hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
b732417b00f9dd6b50a424753272b67d56de84a1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_erfcinvf (size_t n, float *result, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcinvf(y[id]);
}
} | b732417b00f9dd6b50a424753272b67d56de84a1.cu | #include "includes.h"
/*
* JCudaVec - Vector operations for JCuda
* http://www.jcuda.org
*
* Copyright (c) 2013-2015 Marco Hutter - http://www.jcuda.org
*/
extern "C"
//=== Vector arithmetic ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector comparison ======================================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector-and-scalar comparison ===========================================
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
// Calculate the arc sine of the input argument.
extern "C"
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
// Calculate the arc tangent of the input argument.
extern "C"
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
// Calculate the cube root of the input argument.
extern "C"
// Calculate ceiling of the input argument.
extern "C"
// Calculate the cosine of the input argument.
extern "C"
// Calculate the hyperbolic cosine of the input argument.
extern "C"
// Calculate the cosine of the input argument × p .
extern "C"
// Calculate the complementary error function of the input argument.
extern "C"
// Calculate the inverse complementary error function of the input argument.
extern "C"
// Calculate the scaled complementary error function of the input argument.
extern "C"
// Calculate the error function of the input argument.
extern "C"
// Calculate the inverse error function of the input argument.
extern "C"
// Calculate the base 10 exponential of the input argument.
extern "C"
// Calculate the base 2 exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument.
extern "C"
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
// Calculate the absolute value of its argument.
extern "C"
// Calculate the largest integer less than or equal to x.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
// Calculate the base 10 logarithm of the input argument.
extern "C"
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
// Calculate the base 2 logarithm of the input argument.
extern "C"
// Calculate the floating point representation of the exponent of the input argument.
extern "C"
// Calculate the natural logarithm of the input argument.
extern "C"
// Calculate the standard normal cumulative distribution function.
extern "C"
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
// Calculate reciprocal cube root function.
extern "C"
// Round input to nearest integer value in floating-point.
extern "C"
// Round to nearest integer value in floating-point.
extern "C"
// Calculate the reciprocal of the square root of the input argument.
extern "C"
// Calculate the sine of the input argument.
extern "C"
// Calculate the hyperbolic sine of the input argument.
extern "C"
// Calculate the sine of the input argument × p .
extern "C"
// Calculate the square root of the input argument.
extern "C"
// Calculate the tangent of the input argument.
extern "C"
// Calculate the hyperbolic tangent of the input argument.
extern "C"
// Calculate the gamma function of the input argument.
extern "C"
// Truncate input argument to the integral part.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
// Compute the positive difference between x and y.
extern "C"
// Divide two floating point values.
extern "C"
// Determine the maximum numeric value of the arguments.
extern "C"
// Determine the minimum numeric value of the arguments.
extern "C"
// Calculate the floating-point remainder of x / y.
extern "C"
// Calculate the square root of the sum of squares of two arguments.
extern "C"
// Return next representable single-precision floating-point value afer argument.
extern "C"
// Calculate the value of first argument to the power of second argument.
extern "C"
// Compute single-precision floating-point remainder.
extern "C"
__global__ void vec_erfcinvf (size_t n, float *result, float *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcinvf(y[id]);
}
} |
f413f14e43e20a1f05418632c1ab483c844a13ec.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define THREADS_POR_BLOCO 1024
#define DIM_BLOCO 32
#define DIM_GRID 1024
__global__ void add(int *a, int *b, int *c, int N){
//int i = threadIdx.y + blockIdx.y*blockDim.y;
//int j = threadIdx.x + blockIdx.x*blockDim.x;
int index =
DIM_BLOCO*DIM_BLOCO*(DIM_GRID*blockIdx.x+blockIdx.y)+blockDim.y*threadIdx.x+threadIdx.y;
if(index < N)
c[index] = a[index] + b[index];
}
int main()
{
int *A, *B, *C;
int *d_A, *d_B, *d_C;
int i, j;
//Input
int linhas, colunas;
scanf("%d", &linhas);
scanf("%d", &colunas);
//Definindo tamanho dos arrays que representaro as matrizes
int N = linhas*colunas;
int size = sizeof(int)*N;
//Alocando memria na GPU
hipMalloc((void **)&d_A,size);
hipMalloc((void **)&d_B,size);
hipMalloc((void **)&d_C,size);
//Alocando memria na CPU
A = (int *)malloc(size);
B = (int *)malloc(size);
C = (int *)malloc(size);
//Inicializar
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
A[i*colunas+j] = B[i*colunas+j] = i+j;
//printf("%d ",A[i*colunas+j]);
}
//printf("\n");
}
//Transferir para a memria da GPU
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
hipMemcpy(d_C, C, size, hipMemcpyHostToDevice);
//Computacao que dever ser movida para a GPU
// Nmero de blocos = Nmero de linhas
// threads por bloco = nmero de colunas
dim3 dimGrid(DIM_GRID,DIM_GRID);
dim3 dimBlock(DIM_BLOCO,DIM_BLOCO);
hipLaunchKernelGGL(( add), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A,d_B,d_C,N);
//add<<<(N+THREADS_POR_BLOCO-1)/THREADS_POR_BLOCO,THREADS_POR_BLOCO>>>(d_A,d_B,d_C,N);
hipMemcpy(C,d_C,size,hipMemcpyDeviceToHost);
long long int somador=0;
//Manter esta computao na CPU
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
somador+=C[i*colunas+j];
//printf("%d ",C[i*colunas+j]);
}
//printf("\n");
}
printf("%lli\n", somador);
free(A); free(B); free(C);
hipFree(d_A); hipFree(d_B); hipFree(d_C);
}
| f413f14e43e20a1f05418632c1ab483c844a13ec.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define THREADS_POR_BLOCO 1024
#define DIM_BLOCO 32
#define DIM_GRID 1024
__global__ void add(int *a, int *b, int *c, int N){
//int i = threadIdx.y + blockIdx.y*blockDim.y;
//int j = threadIdx.x + blockIdx.x*blockDim.x;
int index =
DIM_BLOCO*DIM_BLOCO*(DIM_GRID*blockIdx.x+blockIdx.y)+blockDim.y*threadIdx.x+threadIdx.y;
if(index < N)
c[index] = a[index] + b[index];
}
int main()
{
int *A, *B, *C;
int *d_A, *d_B, *d_C;
int i, j;
//Input
int linhas, colunas;
scanf("%d", &linhas);
scanf("%d", &colunas);
//Definindo tamanho dos arrays que representarão as matrizes
int N = linhas*colunas;
int size = sizeof(int)*N;
//Alocando memória na GPU
cudaMalloc((void **)&d_A,size);
cudaMalloc((void **)&d_B,size);
cudaMalloc((void **)&d_C,size);
//Alocando memória na CPU
A = (int *)malloc(size);
B = (int *)malloc(size);
C = (int *)malloc(size);
//Inicializar
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
A[i*colunas+j] = B[i*colunas+j] = i+j;
//printf("%d ",A[i*colunas+j]);
}
//printf("\n");
}
//Transferir para a memória da GPU
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, size, cudaMemcpyHostToDevice);
//Computacao que deverá ser movida para a GPU
// Número de blocos = Número de linhas
// threads por bloco = número de colunas
dim3 dimGrid(DIM_GRID,DIM_GRID);
dim3 dimBlock(DIM_BLOCO,DIM_BLOCO);
add<<<dimGrid,dimBlock>>>(d_A,d_B,d_C,N);
//add<<<(N+THREADS_POR_BLOCO-1)/THREADS_POR_BLOCO,THREADS_POR_BLOCO>>>(d_A,d_B,d_C,N);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
long long int somador=0;
//Manter esta computação na CPU
for(i = 0; i < linhas; i++){
for(j = 0; j < colunas; j++){
somador+=C[i*colunas+j];
//printf("%d ",C[i*colunas+j]);
}
//printf("\n");
}
printf("%lli\n", somador);
free(A); free(B); free(C);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
}
|
05f1180bcc874de096b389c9a7cf5b6bb04cd64d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define COUNTERS 66
#define C_SIZE 64
#define C_STOP 65 // == C_SIZE+1
#define N 4224 // == COUNTERS*C_SIZE
#define N2 17842176 // == N*N
#define CUDA_ERROR_CHECK
#define cudaSafeCall(error) __cudaSafeCall(error, __FILE__, __LINE__)
#define cudaCheckErrors() __cudaCheckErrors(__FILE__, __LINE__)
__device__ inline int uniq(const int* M, int i, int* counters) {
for (int j = 1; j <= i - 1; j++) {
int a = (j - 1) * C_SIZE + counters[j-1];
int b = (i - 1) * C_SIZE + counters[i-1];
if (M[(a - 1) + N * (b - 1)] == 0)
return 1;
}
return 0;
}
__global__ void searcher(const int* M, int* res, size_t* itersNum) {
int partNumber = threadIdx.x + blockIdx.x * blockDim.x;
// initialize counters vector
int counters[COUNTERS];
for (int i = 0; i < COUNTERS; i++)
counters[i] = 1;
// go to selected part
counters[0] = 25;
counters[1] = 5;
counters[2] = 1;
counters[3] = 3;
counters[4] = 4;
counters[5] = 7;
counters[6] = 9;
counters[7] = 2;
counters[8] = 10;
counters[9] = 8;
counters[10] = (partNumber - 1) / 64 + 1;
counters[11] = (partNumber - 1) % 64 + 1;
size_t iter = 0;
size_t current = 1;
while (1) {
iter++;
// stop if search in the selected part is finished
if (counters[10] != (partNumber - 1) / 64 + 1 || counters[11] != (partNumber - 1) % 64 + 1) {
for (int i = 0; i < COUNTERS; i++)
res[partNumber * COUNTERS + i] = -1;
itersNum[partNumber] = iter;
break;
}
// first subspace is always good
if (current == 1)
current = 2;
// print intermediate state
// if (current == 13 && iter > 1000) {
// fprintf(f, "Current state of part number %d:", partNumber);
// for (int i = 0; i < COUNTERS; i++)
// fprintf(f, " %d", counters[i]);
// fprintf(f, "\nNumber of iterations: %f\n\n", iter);
// fflush(f);
// }
for (int i = current; i <= COUNTERS; i++) {
if (uniq(M, i, counters) == 1) {
counters[i-1]++;
current = i;
while (counters[current-1] == C_STOP) {
counters[current - 1] = 1;
counters[current - 2] = counters[current - 2] + 1;
current--;
}
break;
}
}
if (current == COUNTERS && uniq(M, current, counters) == 0) {
for (int i = 0; i < COUNTERS; i++)
res[partNumber * COUNTERS + i] = counters[i];
itersNum[partNumber] = iter;
break;
}
}
} | 05f1180bcc874de096b389c9a7cf5b6bb04cd64d.cu | #include "includes.h"
#define COUNTERS 66
#define C_SIZE 64
#define C_STOP 65 // == C_SIZE+1
#define N 4224 // == COUNTERS*C_SIZE
#define N2 17842176 // == N*N
#define CUDA_ERROR_CHECK
#define cudaSafeCall(error) __cudaSafeCall(error, __FILE__, __LINE__)
#define cudaCheckErrors() __cudaCheckErrors(__FILE__, __LINE__)
__device__ inline int uniq(const int* M, int i, int* counters) {
for (int j = 1; j <= i - 1; j++) {
int a = (j - 1) * C_SIZE + counters[j-1];
int b = (i - 1) * C_SIZE + counters[i-1];
if (M[(a - 1) + N * (b - 1)] == 0)
return 1;
}
return 0;
}
__global__ void searcher(const int* M, int* res, size_t* itersNum) {
int partNumber = threadIdx.x + blockIdx.x * blockDim.x;
// initialize counters vector
int counters[COUNTERS];
for (int i = 0; i < COUNTERS; i++)
counters[i] = 1;
// go to selected part
counters[0] = 25;
counters[1] = 5;
counters[2] = 1;
counters[3] = 3;
counters[4] = 4;
counters[5] = 7;
counters[6] = 9;
counters[7] = 2;
counters[8] = 10;
counters[9] = 8;
counters[10] = (partNumber - 1) / 64 + 1;
counters[11] = (partNumber - 1) % 64 + 1;
size_t iter = 0;
size_t current = 1;
while (1) {
iter++;
// stop if search in the selected part is finished
if (counters[10] != (partNumber - 1) / 64 + 1 || counters[11] != (partNumber - 1) % 64 + 1) {
for (int i = 0; i < COUNTERS; i++)
res[partNumber * COUNTERS + i] = -1;
itersNum[partNumber] = iter;
break;
}
// first subspace is always good
if (current == 1)
current = 2;
// print intermediate state
// if (current == 13 && iter > 1000) {
// fprintf(f, "Current state of part number %d:", partNumber);
// for (int i = 0; i < COUNTERS; i++)
// fprintf(f, " %d", counters[i]);
// fprintf(f, "\nNumber of iterations: %f\n\n", iter);
// fflush(f);
// }
for (int i = current; i <= COUNTERS; i++) {
if (uniq(M, i, counters) == 1) {
counters[i-1]++;
current = i;
while (counters[current-1] == C_STOP) {
counters[current - 1] = 1;
counters[current - 2] = counters[current - 2] + 1;
current--;
}
break;
}
}
if (current == COUNTERS && uniq(M, current, counters) == 0) {
for (int i = 0; i < COUNTERS; i++)
res[partNumber * COUNTERS + i] = counters[i];
itersNum[partNumber] = iter;
break;
}
}
} |
bac71527960956fb9622cb28455017da7a36812c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample), pts_cnt (b,m)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
pts_cnt[j] = cnt;
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int batch_index = blockIdx.x;
idx += m*nsample*batch_index;
grad_out += m*nsample*c*batch_index;
grad_points += n*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]);
}
}
}
}
// input: k (1), distance matrix dist (b,m,n)
// output: idx (b,m,n), dist_out (b,m,n)
// only the top k results within n are useful
__global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
int batch_index = blockIdx.x;
dist+=m*n*batch_index;
outi+=m*n*batch_index;
out+=m*n*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
// copy from dist to dist_out
for (int j=index;j<m;j+=stride) {
for (int s=0;s<n;++s) {
out[j*n+s] = dist[j*n+s];
outi[j*n+s] = s;
}
}
float *p_dist;
for (int j=index;j<m;j+=stride) {
p_dist = out+j*n;
// selection sort for the first k elements
for (int s=0;s<k;++s) {
int min=s;
// find the min
for (int t=s+1;t<n;++t) {
if (p_dist[t]<p_dist[min]) {
min = t;
}
}
// swap min-th and i-th element
if (min!=s) {
float tmp = p_dist[min];
p_dist[min] = p_dist[s];
p_dist[s] = tmp;
int tmpi = outi[j*n+min];
outi[j*n+min] = outi[j*n+s];
outi[j*n+s] = tmpi;
}
}
}
}
void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
hipLaunchKernelGGL(( query_ball_point_gpu), dim3(b),dim3(256), 0, 0, b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
//hipDeviceSynchronize();
}
void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
hipLaunchKernelGGL(( selection_sort_gpu), dim3(b),dim3(256), 0, 0, b,n,m,k,dist,outi,out);
//hipDeviceSynchronize();
}
void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){
hipLaunchKernelGGL(( group_point_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,points,idx,out);
//hipDeviceSynchronize();
}
void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){
hipLaunchKernelGGL(( group_point_grad_gpu), dim3(b),dim3(256), 0, 0, b,n,c,m,nsample,grad_out,idx,grad_points);
//group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
//hipDeviceSynchronize();
}
| bac71527960956fb9622cb28455017da7a36812c.cu | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample), pts_cnt (b,m)
__global__ void query_ball_point_gpu(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
pts_cnt[j] = cnt;
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int batch_index = blockIdx.x;
idx += m*nsample*batch_index;
grad_out += m*nsample*c*batch_index;
grad_points += n*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]);
}
}
}
}
// input: k (1), distance matrix dist (b,m,n)
// output: idx (b,m,n), dist_out (b,m,n)
// only the top k results within n are useful
__global__ void selection_sort_gpu(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
int batch_index = blockIdx.x;
dist+=m*n*batch_index;
outi+=m*n*batch_index;
out+=m*n*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
// copy from dist to dist_out
for (int j=index;j<m;j+=stride) {
for (int s=0;s<n;++s) {
out[j*n+s] = dist[j*n+s];
outi[j*n+s] = s;
}
}
float *p_dist;
for (int j=index;j<m;j+=stride) {
p_dist = out+j*n;
// selection sort for the first k elements
for (int s=0;s<k;++s) {
int min=s;
// find the min
for (int t=s+1;t<n;++t) {
if (p_dist[t]<p_dist[min]) {
min = t;
}
}
// swap min-th and i-th element
if (min!=s) {
float tmp = p_dist[min];
p_dist[min] = p_dist[s];
p_dist[s] = tmp;
int tmpi = outi[j*n+min];
outi[j*n+min] = outi[j*n+s];
outi[j*n+s] = tmpi;
}
}
}
}
void queryBallPointLauncher(int b, int n, int m, float radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
query_ball_point_gpu<<<b,256>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
//cudaDeviceSynchronize();
}
void selectionSortLauncher(int b, int n, int m, int k, const float *dist, int *outi, float *out) {
selection_sort_gpu<<<b,256>>>(b,n,m,k,dist,outi,out);
//cudaDeviceSynchronize();
}
void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){
group_point_gpu<<<b,256>>>(b,n,c,m,nsample,points,idx,out);
//cudaDeviceSynchronize();
}
void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){
group_point_grad_gpu<<<b,256>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
//group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
//cudaDeviceSynchronize();
}
|
e03076f9f52ab18fdb078311f5223f660768c4c8.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "hip/hip_runtime.h"
#include "math.h"
__global__ void bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum, int in,
int hid) {
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1);
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if (tx == 0) input_node[ty] = input_cuda[index_in];
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index];
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
for (int i = 1; i <= __log2f(HEIGHT); i++) {
int power_two = __powf(2, i);
if (ty % power_two == 0)
weight_matrix[ty][tx] =
weight_matrix[ty][tx] + weight_matrix[ty + power_two / 2][tx];
__syncthreads();
}
//__syncthreads();
input_hidden_cuda[index] = weight_matrix[ty][tx];
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] +
weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if (tx == 0) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
__global__ void bpnn_adjust_weights_cuda(float *delta, int hid, float *ly,
int in, float *w, float *oldw) {
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1);
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
// eta = 0.3;
// momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] =
((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by == 0) {
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
| e03076f9f52ab18fdb078311f5223f660768c4c8.cu |
#ifndef _BACKPROP_CUDA_KERNEL_H_
#define _BACKPROP_CUDA_KERNEL_H_
#include <stdio.h>
#include "backprop.h"
#include "cuda.h"
#include "math.h"
__global__ void bpnn_layerforward_CUDA(float *input_cuda,
float *output_hidden_cuda,
float *input_hidden_cuda,
float *hidden_partial_sum, int in,
int hid) {
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1);
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if (tx == 0) input_node[ty] = input_cuda[index_in];
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index];
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
for (int i = 1; i <= __log2f(HEIGHT); i++) {
int power_two = __powf(2, i);
if (ty % power_two == 0)
weight_matrix[ty][tx] =
weight_matrix[ty][tx] + weight_matrix[ty + power_two / 2][tx];
__syncthreads();
}
//__syncthreads();
input_hidden_cuda[index] = weight_matrix[ty][tx];
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] +
weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if (tx == 0) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
}
__global__ void bpnn_adjust_weights_cuda(float *delta, int hid, float *ly,
int in, float *w, float *oldw) {
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = (hid + 1) * HEIGHT * by + (hid + 1) * ty + tx + 1 + (hid + 1);
int index_y = HEIGHT * by + ty + 1;
int index_x = tx + 1;
// eta = 0.3;
// momentum = 0.3;
w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
oldw[index] =
((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index]));
__syncthreads();
if (ty == 0 && by == 0) {
w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x]));
}
}
#endif
|
c7a889cede1e3d7ea6c507ae08068b077103c899.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <Timer.hpp>
#include <cmath>
#include <iomanip>
using std::cout;
using std::cerr;
using std::endl;
using std::fixed;
using std::setprecision;
using LOFAR::NSTimer;
const unsigned int DIM = 200000;
//const unsigned int DIM = 200;
const unsigned int nrThreads = 256;
__global__ void vectorAdd(const unsigned int DIM, float *a, float *b, float *c) {
unsigned int item = (blockIdx.x * blockDim.x) + threadIdx.x;
c[item] = a[item] + b[item];
}
int main(void) {
hipError_t devRetVal = hipSuccess;
float *a = new float [DIM];
float *b = new float [DIM];
float *c = new float [DIM];
float *devA = 0;
float *devB = 0;
float *devC = 0;
NSTimer globalTimer("GlobalTimer", false, false);
NSTimer kernelTimer("KernelTimer", false, false);
NSTimer memoryTimer("MemoryTimer", false, false);
// Prepare input and output data structures
for ( unsigned int i = 0; i < DIM; i++ ) {
a[i] = static_cast< float >(i);
b[i] = static_cast< float >(i + 1);
c[i] = 0.0f;
}
cout << "Starting execution" << endl;
// Start of the computation
globalTimer.start();
// Allocate CUDA memory
if ( (devRetVal = hipMalloc(reinterpret_cast< void ** >(&devA), DIM * sizeof(float))) != hipSuccess ) {
cerr << "Impossible to allocate device memory for a." << endl;
return 1;
}
if ( (devRetVal = hipMalloc(reinterpret_cast< void ** >(&devB), DIM * sizeof(float))) != hipSuccess ) {
cerr << "Impossible to allocate device memory for b." << endl;
return 1;
}
if ( (devRetVal = hipMalloc(reinterpret_cast< void ** >(&devC), DIM * sizeof(float))) != hipSuccess ) {
cerr << "Impossible to allocate device memory for c." << endl;
return 1;
}
// Copy input to device
memoryTimer.start();
if ( (devRetVal = hipMemcpy(devA, reinterpret_cast< void * >(a), DIM * sizeof(float), hipMemcpyHostToDevice)) != hipSuccess ) {
cerr << "Impossible to copy devA to device." << endl;
return 1;
}
if ( (devRetVal = hipMemcpy(devB, reinterpret_cast< void * >(b), DIM * sizeof(float), hipMemcpyHostToDevice)) != hipSuccess ) {
cerr << "Impossible to copy devB to device." << endl;
return 1;
}
memoryTimer.stop();
// Execute the kernel
dim3 gridSize(static_cast< unsigned int >(ceil(DIM / static_cast< float >(nrThreads))));
dim3 blockSize(nrThreads);
kernelTimer.start();
hipLaunchKernelGGL(( vectorAdd), dim3(gridSize), dim3(blockSize) , 0, 0, DIM, devA, devB, devC);
hipDeviceSynchronize();
kernelTimer.stop();
// Check if the kernel returned an error
if ( (devRetVal = hipGetLastError()) != hipSuccess ) {
cerr << "Uh, the kernel had some kind of issue." << endl;
return 1;
}
// Copy the output back to host
memoryTimer.start();
if ( (devRetVal = hipMemcpy(reinterpret_cast< void * >(c), devC, DIM * sizeof(float), hipMemcpyDeviceToHost)) != hipSuccess ) {
cerr << "Impossible to copy devC to host." << endl;
return 1;
}
memoryTimer.stop();
// End of the computation
globalTimer.stop();
// Check the correctness
for ( unsigned int i = 0; i < DIM; i++ ) {
// Not the best floating point comparison, but this is just a CUDA example
if ( (c[i] - (a[i] + b[i])) > 0 ) {
cerr << "This result (" << i << ") looks wrong: " << c[i] << " != " << a[i] + b[i] << endl;
return 1;
}
}
// Print the timers
cout << fixed << setprecision(6);
cout << endl;
cout << "Total (s): \t" << globalTimer.getElapsed() << endl;
cout << "Kernel (s): \t" << kernelTimer.getElapsed() << endl;
cout << "Memory (s): \t" << memoryTimer.getElapsed() << endl;
cout << endl;
hipFree(devA);
hipFree(devB);
hipFree(devC);
return 0;
}
| c7a889cede1e3d7ea6c507ae08068b077103c899.cu |
#include <iostream>
#include <Timer.hpp>
#include <cmath>
#include <iomanip>
using std::cout;
using std::cerr;
using std::endl;
using std::fixed;
using std::setprecision;
using LOFAR::NSTimer;
const unsigned int DIM = 200000;
//const unsigned int DIM = 200;
const unsigned int nrThreads = 256;
__global__ void vectorAdd(const unsigned int DIM, float *a, float *b, float *c) {
unsigned int item = (blockIdx.x * blockDim.x) + threadIdx.x;
c[item] = a[item] + b[item];
}
int main(void) {
cudaError_t devRetVal = cudaSuccess;
float *a = new float [DIM];
float *b = new float [DIM];
float *c = new float [DIM];
float *devA = 0;
float *devB = 0;
float *devC = 0;
NSTimer globalTimer("GlobalTimer", false, false);
NSTimer kernelTimer("KernelTimer", false, false);
NSTimer memoryTimer("MemoryTimer", false, false);
// Prepare input and output data structures
for ( unsigned int i = 0; i < DIM; i++ ) {
a[i] = static_cast< float >(i);
b[i] = static_cast< float >(i + 1);
c[i] = 0.0f;
}
cout << "Starting execution" << endl;
// Start of the computation
globalTimer.start();
// Allocate CUDA memory
if ( (devRetVal = cudaMalloc(reinterpret_cast< void ** >(&devA), DIM * sizeof(float))) != cudaSuccess ) {
cerr << "Impossible to allocate device memory for a." << endl;
return 1;
}
if ( (devRetVal = cudaMalloc(reinterpret_cast< void ** >(&devB), DIM * sizeof(float))) != cudaSuccess ) {
cerr << "Impossible to allocate device memory for b." << endl;
return 1;
}
if ( (devRetVal = cudaMalloc(reinterpret_cast< void ** >(&devC), DIM * sizeof(float))) != cudaSuccess ) {
cerr << "Impossible to allocate device memory for c." << endl;
return 1;
}
// Copy input to device
memoryTimer.start();
if ( (devRetVal = cudaMemcpy(devA, reinterpret_cast< void * >(a), DIM * sizeof(float), cudaMemcpyHostToDevice)) != cudaSuccess ) {
cerr << "Impossible to copy devA to device." << endl;
return 1;
}
if ( (devRetVal = cudaMemcpy(devB, reinterpret_cast< void * >(b), DIM * sizeof(float), cudaMemcpyHostToDevice)) != cudaSuccess ) {
cerr << "Impossible to copy devB to device." << endl;
return 1;
}
memoryTimer.stop();
// Execute the kernel
dim3 gridSize(static_cast< unsigned int >(ceil(DIM / static_cast< float >(nrThreads))));
dim3 blockSize(nrThreads);
kernelTimer.start();
vectorAdd<<< gridSize, blockSize >>>(DIM, devA, devB, devC);
cudaDeviceSynchronize();
kernelTimer.stop();
// Check if the kernel returned an error
if ( (devRetVal = cudaGetLastError()) != cudaSuccess ) {
cerr << "Uh, the kernel had some kind of issue." << endl;
return 1;
}
// Copy the output back to host
memoryTimer.start();
if ( (devRetVal = cudaMemcpy(reinterpret_cast< void * >(c), devC, DIM * sizeof(float), cudaMemcpyDeviceToHost)) != cudaSuccess ) {
cerr << "Impossible to copy devC to host." << endl;
return 1;
}
memoryTimer.stop();
// End of the computation
globalTimer.stop();
// Check the correctness
for ( unsigned int i = 0; i < DIM; i++ ) {
// Not the best floating point comparison, but this is just a CUDA example
if ( (c[i] - (a[i] + b[i])) > 0 ) {
cerr << "This result (" << i << ") looks wrong: " << c[i] << " != " << a[i] + b[i] << endl;
return 1;
}
}
// Print the timers
cout << fixed << setprecision(6);
cout << endl;
cout << "Total (s): \t" << globalTimer.getElapsed() << endl;
cout << "Kernel (s): \t" << kernelTimer.getElapsed() << endl;
cout << "Memory (s): \t" << memoryTimer.getElapsed() << endl;
cout << endl;
cudaFree(devA);
cudaFree(devB);
cudaFree(devC);
return 0;
}
|
081dbc2196631a4d7169ef295e38c4e373690ab7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Mark Gates
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#define NB 64
/* Vector is divided into ceil(n/nb) blocks.
Each thread swaps one element, x[tid] <---> y[tid].
*/
__global__ void zswap_kernel(
int n,
magmaDoubleComplex *x, int incx,
magmaDoubleComplex *y, int incy )
{
magmaDoubleComplex tmp;
int ind = threadIdx.x + blockDim.x*blockIdx.x;
if ( ind < n ) {
x += ind*incx;
y += ind*incy;
tmp = *x;
*x = *y;
*y = tmp;
}
}
/***************************************************************************//**
Purpose:
=============
Swap vector x and y; \f$ x <-> y \f$.
@param[in]
n Number of elements in vector x and y. n >= 0.
@param[in,out]
dx COMPLEX_16 array on GPU device.
The n element vector x of dimension (1 + (n-1)*incx).
@param[in]
incx Stride between consecutive elements of dx. incx != 0.
@param[in,out]
dy COMPLEX_16 array on GPU device.
The n element vector y of dimension (1 + (n-1)*incy).
@param[in]
incy Stride between consecutive elements of dy. incy != 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swap
*******************************************************************************/
extern "C" void
magmablas_zswap(
magma_int_t n,
magmaDoubleComplex_ptr dx, magma_int_t incx,
magmaDoubleComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( magma_ceildiv( n, NB ) );
hipLaunchKernelGGL(( zswap_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dx, incx, dy, incy );
}
| 081dbc2196631a4d7169ef295e38c4e373690ab7.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Mark Gates
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#define NB 64
/* Vector is divided into ceil(n/nb) blocks.
Each thread swaps one element, x[tid] <---> y[tid].
*/
__global__ void zswap_kernel(
int n,
magmaDoubleComplex *x, int incx,
magmaDoubleComplex *y, int incy )
{
magmaDoubleComplex tmp;
int ind = threadIdx.x + blockDim.x*blockIdx.x;
if ( ind < n ) {
x += ind*incx;
y += ind*incy;
tmp = *x;
*x = *y;
*y = tmp;
}
}
/***************************************************************************//**
Purpose:
=============
Swap vector x and y; \f$ x <-> y \f$.
@param[in]
n Number of elements in vector x and y. n >= 0.
@param[in,out]
dx COMPLEX_16 array on GPU device.
The n element vector x of dimension (1 + (n-1)*incx).
@param[in]
incx Stride between consecutive elements of dx. incx != 0.
@param[in,out]
dy COMPLEX_16 array on GPU device.
The n element vector y of dimension (1 + (n-1)*incy).
@param[in]
incy Stride between consecutive elements of dy. incy != 0.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_swap
*******************************************************************************/
extern "C" void
magmablas_zswap(
magma_int_t n,
magmaDoubleComplex_ptr dx, magma_int_t incx,
magmaDoubleComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
dim3 threads( NB );
dim3 grid( magma_ceildiv( n, NB ) );
zswap_kernel<<< grid, threads, 0, queue->cuda_stream() >>>( n, dx, incx, dy, incy );
}
|
eb92ca619fe272ed13c09193fc67cdf4d16cafa6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "defines.cu"
/// @brief Updates backpropagated error by activation derivative.
/// @details err_y *= 1.0 - exp(-y)
extern "C"
__global__ void err_y_update(dtype *err_y, const dtype *y) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < ERR_OUTPUT_SIZE) {
err_y[idx] *= (dtype)1.0 - exp(-y[idx]);
}
}
| eb92ca619fe272ed13c09193fc67cdf4d16cafa6.cu | #include "defines.cu"
/// @brief Updates backpropagated error by activation derivative.
/// @details err_y *= 1.0 - exp(-y)
extern "C"
__global__ void err_y_update(dtype *err_y, const dtype *y) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < ERR_OUTPUT_SIZE) {
err_y[idx] *= (dtype)1.0 - exp(-y[idx]);
}
}
|
7aa033a7fa665e7e515380382dcaff436130ce51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
__global__ void
magma_zlobpcg_maxpy_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaDoubleComplex * X,
magmaDoubleComplex * Y)
{
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if ( row < num_rows ) {
for( int i=0; i < num_vecs; i++ ) {
Y[ row + i*num_rows ] += X[ row + i*num_rows ];
}
}
}
/**
Purpose
-------
This routine computes a axpy for a mxn matrix:
Y = X + Y
It replaces:
magma_zaxpy(m*n, c_one, Y, 1, X, 1);
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
X magmaDoubleComplex_ptr
input vector X
@param[in,out]
Y magmaDoubleComplex_ptr
input/output vector Y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_maxpy(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaDoubleComplex_ptr X,
magmaDoubleComplex_ptr Y,
magma_queue_t queue )
{
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
magma_int_t threads = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( magma_ceildiv( num_rows, block_size ) );
hipLaunchKernelGGL(( magma_zlobpcg_maxpy_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
num_rows, num_vecs, X, Y );
return MAGMA_SUCCESS;
}
| 7aa033a7fa665e7e515380382dcaff436130ce51.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
*/
#include "magmasparse_internal.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
__global__ void
magma_zlobpcg_maxpy_kernel(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaDoubleComplex * X,
magmaDoubleComplex * Y)
{
int row = blockIdx.x * blockDim.x + threadIdx.x; // global row index
if ( row < num_rows ) {
for( int i=0; i < num_vecs; i++ ) {
Y[ row + i*num_rows ] += X[ row + i*num_rows ];
}
}
}
/**
Purpose
-------
This routine computes a axpy for a mxn matrix:
Y = X + Y
It replaces:
magma_zaxpy(m*n, c_one, Y, 1, X, 1);
/ x1[0] x2[0] x3[0] \
| x1[1] x2[1] x3[1] |
X = | x1[2] x2[2] x3[2] | = x1[0] x1[1] x1[2] x1[3] x1[4] x2[0] x2[1] .
| x1[3] x2[3] x3[3] |
\ x1[4] x2[4] x3[4] /
Arguments
---------
@param[in]
num_rows magma_int_t
number of rows
@param[in]
num_vecs magma_int_t
number of vectors
@param[in]
X magmaDoubleComplex_ptr
input vector X
@param[in,out]
Y magmaDoubleComplex_ptr
input/output vector Y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zlobpcg_maxpy(
magma_int_t num_rows,
magma_int_t num_vecs,
magmaDoubleComplex_ptr X,
magmaDoubleComplex_ptr Y,
magma_queue_t queue )
{
// every thread handles one row
magma_int_t block_size = BLOCK_SIZE;
magma_int_t threads = BLOCK_SIZE;
dim3 block( block_size );
dim3 grid( magma_ceildiv( num_rows, block_size ) );
magma_zlobpcg_maxpy_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( num_rows, num_vecs, X, Y );
return MAGMA_SUCCESS;
}
|
4ca9185945230621e62390152cd89b511686396f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#define ITER 100
#define TILE_WIDTH 4
__global__ void MatrixMulKernel(float* d_M, float* d_N, float* d_P, int Width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the d_M and d_N tiles required to compute d_P element
for (int ph = 0; ph < Width/TILE_WIDTH; ++ph)
{
// Collaborative loading of d_M and d_N tiles into shared memory
Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
{
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
d_P[Row*Width + Col] = Pvalue;
}
int main()
{
float *h_a, *h_b, *h_c;
h_a = (float *)malloc(ITER * sizeof(float));
h_b = (float *)malloc(ITER * sizeof(float));
h_c = (float *)malloc(ITER * sizeof(float));
for (int i = 0; i < ITER; ++i)
{
h_a[i] = i;
h_b[i] = i;
h_c[i] = i;
}
hipLaunchKernelGGL(( MatrixMulKernel), dim3(ceil(ITER/256.0)), dim3(256), 0, 0, h_a, h_b, h_c, ITER);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
| 4ca9185945230621e62390152cd89b511686396f.cu | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cstdio>
#define ITER 100
#define TILE_WIDTH 4
__global__ void MatrixMulKernel(float* d_M, float* d_N, float* d_P, int Width)
{
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify the row and column of the d_P element to work on
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the d_M and d_N tiles required to compute d_P element
for (int ph = 0; ph < Width/TILE_WIDTH; ++ph)
{
// Collaborative loading of d_M and d_N tiles into shared memory
Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx];
Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k)
{
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
d_P[Row*Width + Col] = Pvalue;
}
int main()
{
float *h_a, *h_b, *h_c;
h_a = (float *)malloc(ITER * sizeof(float));
h_b = (float *)malloc(ITER * sizeof(float));
h_c = (float *)malloc(ITER * sizeof(float));
for (int i = 0; i < ITER; ++i)
{
h_a[i] = i;
h_b[i] = i;
h_c[i] = i;
}
MatrixMulKernel<<<ceil(ITER/256.0), 256>>>(h_a, h_b, h_c, ITER);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
90b1f28a4dee919d6f14f41aaf8d60c48d5be6c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void forward_avgpool_layer_kernel(int n, int w, int h, int c, float *input, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int k = id % c;
id /= c;
int b = id;
int i;
int out_index = (k + c*b);
output[out_index] = 0;
for(i = 0; i < w*h; ++i){
int in_index = i + h*w*(k + b*c);
output[out_index] += input[in_index];
}
output[out_index] /= w*h;
} | 90b1f28a4dee919d6f14f41aaf8d60c48d5be6c4.cu | #include "includes.h"
__global__ void forward_avgpool_layer_kernel(int n, int w, int h, int c, float *input, float *output)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(id >= n) return;
int k = id % c;
id /= c;
int b = id;
int i;
int out_index = (k + c*b);
output[out_index] = 0;
for(i = 0; i < w*h; ++i){
int in_index = i + h*w*(k + b*c);
output[out_index] += input[in_index];
}
output[out_index] /= w*h;
} |
aa17578883859ed2da00cefafd8a9d59a5a29109.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// includes, system
#include <stdio.h>
#include <math.h>
#include "KernelUtils.h"
#include "HostUtils.h"
#include "GraphKernel.h"
__global__ void kernelLabelComponents( long long int *graph,
unsigned long long int *components,
int *hasChange,
long long int sizeComponents,
long long offset ) {
unsigned long long int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long long int idx = tid;
if( idx < sizeComponents ) {
unsigned long long int ev1 = idx + offset;
unsigned long long int ev2 = graph[idx];
unsigned long long int cv1 = components[ev1];
unsigned long long int cv2 = components[ev2];
if( cv1 < cv2 ) {
// atomicMin( &components[e.v2], cv1 );
components[ev2] = cv1;
hasChange[0] = 1;
} else if( cv1 > cv2 ) {
// atomicMin( &components[e.v1], cv2 );
components[ev1] = cv2;
hasChange[0] = 1;
}
}
}
__global__ void kernelInitializeConectedComponents( unsigned long long int *components,
long long int sizeComponents,
long long int offset ) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
long long int idx = tid + offset;
if( idx < sizeComponents ) {
// comentar (somente para kernelSharedMemory)
// idx = (idx ^ (idx >> 1));
components[idx] = idx;
}
}
unsigned long long int * executeKernelLabelComponents( long long int *graph,
unsigned long long int nVertices ) {
printf( "....Start: executeKernelLabelComponents \n\n" );
unsigned long long int *components = NULL;
int *hasChangeHost = NULL;
long long int *graphDev = NULL;
unsigned long long int *componentsDev;
int *hasChangeDev = NULL;
long long int numThreadsPerBlock;
long long int numBlocksOnGrid;
long long int restThreadsToExecute;
long long int numIterations;
// Part 1 of 6: define kernel configuration
// Number of threads per block
// Part 3 of 6: allocate device memory
long long int memComponentsSize = nVertices
* (long long int) sizeof(long long int);
int memHasChangeSize = sizeof(int);
hasChangeHost = getPointerToMatrix( 1 );
hipMalloc( (void **) &hasChangeDev, memHasChangeSize );
if( checkCUDAError( "GraphKernel::hipMalloc. Aborting..." ) ) {
return NULL;
}
//------- Allocate Zero Copy memory -------
hipHostMalloc( (void **) &components, memComponentsSize,
hipHostMallocMapped );
if( !components || checkCUDAError(
"GraphKernel::hipHostMalloc. Aborting..." ) ) {
if( !components ) {
printf(
"GraphKernel::hipHostMalloc. Cannot allocate memory of size: %lld.\n",
memComponentsSize );
}
return NULL;
}
hipHostGetDevicePointer( (void **) &componentsDev, (void *) components, 0 );
if( !componentsDev || checkCUDAError(
"GraphKernel::hipHostGetDevicePointer. Aborting..." ) ) {
printf(
"GraphKernel::hipHostGetDevicePointer. Cannot allocate memory of size: %lld.\n",
memComponentsSize );
return NULL;
}
//-------
//------- Kernel Initialization Execution
hipEvent_t start, stop;
float time;
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
calculateKernelLaunchConfiguration( (long long int) nVertices,
(long long int *) &numThreadsPerBlock,
(long long int *) &numBlocksOnGrid,
(long long int *) &numIterations,
(long long int *) &restThreadsToExecute );
dim3 dimBlockInitVertices( numThreadsPerBlock );
dim3 dimGridInitVertices( numBlocksOnGrid );
long long int offset = 0;
int numIterationsKernelInit = numIterations
+ (restThreadsToExecute <= 0 ? 0 : 1);
for (int i = 0; i < numIterationsKernelInit; i++) {
hipLaunchKernelGGL(( kernelInitializeConectedComponents) , dim3(dimGridInitVertices), dim3(dimBlockInitVertices), 0, 0, componentsDev, nVertices, offset );
offset += dimBlockInitVertices.x * dimGridInitVertices.x;
}
hipDeviceSynchronize();
checkCUDAError( "Error: kernelInitializeConectedComponents" );
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
float timeKernelInitialize = (time / 1000);
int steps = 0;
hipEvent_t startTotalTime, stopTotalTime;
hipEventCreate( &startTotalTime );
hipEventCreate( &stopTotalTime );
hipEventRecord( startTotalTime, 0 );
//-------
//------- Kernel label components execution
calculateKernelLaunchConfiguration( (long long int) nVertices,
&numThreadsPerBlock, &numBlocksOnGrid, &numIterations,
&restThreadsToExecute );
dim3 dimBlockLabelComp( numThreadsPerBlock );
dim3 dimGridLabelComp( numBlocksOnGrid );
printf( "Number of blocks used: %lld \n", numBlocksOnGrid );
printf( "Number of threads used: %lld \n", numThreadsPerBlock );
printf( "Internal iterations: %lld \n", numIterations );
long long int previousMemGraphSize = -1;
do {
if( steps % 1000 == 0 ) {
hipEventCreate( &start );
hipEventCreate( &stop );
hipEventRecord( start, 0 );
}
steps++;
hasChangeHost[0] = 0;
hipMemcpy( hasChangeDev, hasChangeHost, memHasChangeSize,
hipMemcpyHostToDevice );
offset = 0;
for (int i = 0; i < numIterationsKernelInit; i++) {
// Part 4 of 6: host to device copy
int graphDevSize;
if( i == (numIterationsKernelInit - 1) && restThreadsToExecute > 0 ) {
graphDevSize = restThreadsToExecute;
calculateKernelLaunchConfiguration( graphDevSize,
&numThreadsPerBlock, &numBlocksOnGrid, &numIterations,
&restThreadsToExecute );
dim3 dimBlock2( numThreadsPerBlock );
dim3 dimGrid2( numBlocksOnGrid );
dimBlockLabelComp = dimBlock2;
dimGridLabelComp = dimGrid2;
} else {
graphDevSize = (dimGridLabelComp.x * dimBlockLabelComp.x);
}
long long int memGraphSize = graphDevSize
* (long long int) sizeof(long long int);
long long int *partialGraph = (graph + offset);
if( previousMemGraphSize != memGraphSize ) {
if( graphDev != NULL ) {
hipFree( graphDev );
checkCUDAError( "edgesDev [partial] Free" );
}
printf( "GraphPartial size [MEM: %f MB] \n",
((double) (memGraphSize) / (1024.0 * 1024.0)) );
hipMalloc( (void **) &graphDev, memGraphSize );
previousMemGraphSize = memGraphSize;
}
hipMemcpy( graphDev, partialGraph, memGraphSize,
hipMemcpyHostToDevice );
checkCUDAError( "edgesDev Memory Allocation" );
hipLaunchKernelGGL(( kernelLabelComponents) , dim3(dimGridLabelComp), dim3(dimBlockLabelComp), 0, 0, graphDev, componentsDev, hasChangeDev, graphDevSize, offset);
offset += dimGridLabelComp.x * dimBlockLabelComp.x;
hipDeviceSynchronize();
checkCUDAError( "Kernel execution" );
}
hipMemcpy( hasChangeHost, hasChangeDev, memHasChangeSize,
hipMemcpyDeviceToHost );
checkCUDAError( "Memory copy" );
if( steps % 1000 == 0 ) {
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &time, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
// printf( "step: %d - kernelLabelComponents time %f s \n", steps,
// (time / 1000) );
}
} while( hasChangeHost[0] == 1 );
offset += dimGridLabelComp.x * dimBlockLabelComp.x;
hipDeviceSynchronize();
hipMemcpy( components, componentsDev, memComponentsSize,
hipMemcpyDeviceToHost );
checkCUDAError( "Memory copy verticesComponentOut " );
// Calculate total time for algorithm execution
hipEventRecord( stopTotalTime, 0 );
hipEventSynchronize( stopTotalTime );
hipEventElapsedTime( &time, startTotalTime, stopTotalTime );
hipEventDestroy( startTotalTime );
hipEventDestroy( stopTotalTime );
float timeKernelLabelComponents = (time / 1000);
printf( "kernelInitializeConectedComponents time (s): %f \n",
timeKernelInitialize );
printf( "kernelLabelComponents time (s): %f \n", timeKernelLabelComponents );
printf( "Number of steps: %d \n", steps );
// free device memory
if( graphDev != NULL ) {
hipFree( graphDev );
checkCUDAError( "edgesDev [end] Free" );
}
hipFree( hasChangeDev );
checkCUDAError( "hasChangeDev Free" );
printf( "\n\n....End: executeKernelLabelComponents\n\n" );
return components;
}
| aa17578883859ed2da00cefafd8a9d59a5a29109.cu | // includes, system
#include <stdio.h>
#include <math.h>
#include "KernelUtils.h"
#include "HostUtils.h"
#include "GraphKernel.h"
__global__ void kernelLabelComponents( long long int *graph,
unsigned long long int *components,
int *hasChange,
long long int sizeComponents,
long long offset ) {
unsigned long long int tid = blockDim.x * blockIdx.x + threadIdx.x;
unsigned long long int idx = tid;
if( idx < sizeComponents ) {
unsigned long long int ev1 = idx + offset;
unsigned long long int ev2 = graph[idx];
unsigned long long int cv1 = components[ev1];
unsigned long long int cv2 = components[ev2];
if( cv1 < cv2 ) {
// atomicMin( &components[e.v2], cv1 );
components[ev2] = cv1;
hasChange[0] = 1;
} else if( cv1 > cv2 ) {
// atomicMin( &components[e.v1], cv2 );
components[ev1] = cv2;
hasChange[0] = 1;
}
}
}
__global__ void kernelInitializeConectedComponents( unsigned long long int *components,
long long int sizeComponents,
long long int offset ) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
long long int idx = tid + offset;
if( idx < sizeComponents ) {
// comentar (somente para kernelSharedMemory)
// idx = (idx ^ (idx >> 1));
components[idx] = idx;
}
}
unsigned long long int * executeKernelLabelComponents( long long int *graph,
unsigned long long int nVertices ) {
printf( "....Start: executeKernelLabelComponents \n\n" );
unsigned long long int *components = NULL;
int *hasChangeHost = NULL;
long long int *graphDev = NULL;
unsigned long long int *componentsDev;
int *hasChangeDev = NULL;
long long int numThreadsPerBlock;
long long int numBlocksOnGrid;
long long int restThreadsToExecute;
long long int numIterations;
// Part 1 of 6: define kernel configuration
// Number of threads per block
// Part 3 of 6: allocate device memory
long long int memComponentsSize = nVertices
* (long long int) sizeof(long long int);
int memHasChangeSize = sizeof(int);
hasChangeHost = getPointerToMatrix( 1 );
cudaMalloc( (void **) &hasChangeDev, memHasChangeSize );
if( checkCUDAError( "GraphKernel::cudaMalloc. Aborting..." ) ) {
return NULL;
}
//------- Allocate Zero Copy memory -------
cudaHostAlloc( (void **) &components, memComponentsSize,
cudaHostAllocMapped );
if( !components || checkCUDAError(
"GraphKernel::cudaHostAlloc. Aborting..." ) ) {
if( !components ) {
printf(
"GraphKernel::cudaHostAlloc. Cannot allocate memory of size: %lld.\n",
memComponentsSize );
}
return NULL;
}
cudaHostGetDevicePointer( (void **) &componentsDev, (void *) components, 0 );
if( !componentsDev || checkCUDAError(
"GraphKernel::cudaHostGetDevicePointer. Aborting..." ) ) {
printf(
"GraphKernel::cudaHostGetDevicePointer. Cannot allocate memory of size: %lld.\n",
memComponentsSize );
return NULL;
}
//-------
//------- Kernel Initialization Execution
cudaEvent_t start, stop;
float time;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
calculateKernelLaunchConfiguration( (long long int) nVertices,
(long long int *) &numThreadsPerBlock,
(long long int *) &numBlocksOnGrid,
(long long int *) &numIterations,
(long long int *) &restThreadsToExecute );
dim3 dimBlockInitVertices( numThreadsPerBlock );
dim3 dimGridInitVertices( numBlocksOnGrid );
long long int offset = 0;
int numIterationsKernelInit = numIterations
+ (restThreadsToExecute <= 0 ? 0 : 1);
for (int i = 0; i < numIterationsKernelInit; i++) {
kernelInitializeConectedComponents <<< dimGridInitVertices, dimBlockInitVertices>>> ( componentsDev, nVertices, offset );
offset += dimBlockInitVertices.x * dimGridInitVertices.x;
}
cudaThreadSynchronize();
checkCUDAError( "Error: kernelInitializeConectedComponents" );
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
float timeKernelInitialize = (time / 1000);
int steps = 0;
cudaEvent_t startTotalTime, stopTotalTime;
cudaEventCreate( &startTotalTime );
cudaEventCreate( &stopTotalTime );
cudaEventRecord( startTotalTime, 0 );
//-------
//------- Kernel label components execution
calculateKernelLaunchConfiguration( (long long int) nVertices,
&numThreadsPerBlock, &numBlocksOnGrid, &numIterations,
&restThreadsToExecute );
dim3 dimBlockLabelComp( numThreadsPerBlock );
dim3 dimGridLabelComp( numBlocksOnGrid );
printf( "Number of blocks used: %lld \n", numBlocksOnGrid );
printf( "Number of threads used: %lld \n", numThreadsPerBlock );
printf( "Internal iterations: %lld \n", numIterations );
long long int previousMemGraphSize = -1;
do {
if( steps % 1000 == 0 ) {
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
}
steps++;
hasChangeHost[0] = 0;
cudaMemcpy( hasChangeDev, hasChangeHost, memHasChangeSize,
cudaMemcpyHostToDevice );
offset = 0;
for (int i = 0; i < numIterationsKernelInit; i++) {
// Part 4 of 6: host to device copy
int graphDevSize;
if( i == (numIterationsKernelInit - 1) && restThreadsToExecute > 0 ) {
graphDevSize = restThreadsToExecute;
calculateKernelLaunchConfiguration( graphDevSize,
&numThreadsPerBlock, &numBlocksOnGrid, &numIterations,
&restThreadsToExecute );
dim3 dimBlock2( numThreadsPerBlock );
dim3 dimGrid2( numBlocksOnGrid );
dimBlockLabelComp = dimBlock2;
dimGridLabelComp = dimGrid2;
} else {
graphDevSize = (dimGridLabelComp.x * dimBlockLabelComp.x);
}
long long int memGraphSize = graphDevSize
* (long long int) sizeof(long long int);
long long int *partialGraph = (graph + offset);
if( previousMemGraphSize != memGraphSize ) {
if( graphDev != NULL ) {
cudaFree( graphDev );
checkCUDAError( "edgesDev [partial] Free" );
}
printf( "GraphPartial size [MEM: %f MB] \n",
((double) (memGraphSize) / (1024.0 * 1024.0)) );
cudaMalloc( (void **) &graphDev, memGraphSize );
previousMemGraphSize = memGraphSize;
}
cudaMemcpy( graphDev, partialGraph, memGraphSize,
cudaMemcpyHostToDevice );
checkCUDAError( "edgesDev Memory Allocation" );
kernelLabelComponents <<< dimGridLabelComp, dimBlockLabelComp>>>(graphDev, componentsDev, hasChangeDev, graphDevSize, offset);
offset += dimGridLabelComp.x * dimBlockLabelComp.x;
cudaThreadSynchronize();
checkCUDAError( "Kernel execution" );
}
cudaMemcpy( hasChangeHost, hasChangeDev, memHasChangeSize,
cudaMemcpyDeviceToHost );
checkCUDAError( "Memory copy" );
if( steps % 1000 == 0 ) {
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
// printf( "step: %d - kernelLabelComponents time %f s \n", steps,
// (time / 1000) );
}
} while( hasChangeHost[0] == 1 );
offset += dimGridLabelComp.x * dimBlockLabelComp.x;
cudaThreadSynchronize();
cudaMemcpy( components, componentsDev, memComponentsSize,
cudaMemcpyDeviceToHost );
checkCUDAError( "Memory copy verticesComponentOut " );
// Calculate total time for algorithm execution
cudaEventRecord( stopTotalTime, 0 );
cudaEventSynchronize( stopTotalTime );
cudaEventElapsedTime( &time, startTotalTime, stopTotalTime );
cudaEventDestroy( startTotalTime );
cudaEventDestroy( stopTotalTime );
float timeKernelLabelComponents = (time / 1000);
printf( "kernelInitializeConectedComponents time (s): %f \n",
timeKernelInitialize );
printf( "kernelLabelComponents time (s): %f \n", timeKernelLabelComponents );
printf( "Number of steps: %d \n", steps );
// free device memory
if( graphDev != NULL ) {
cudaFree( graphDev );
checkCUDAError( "edgesDev [end] Free" );
}
cudaFree( hasChangeDev );
checkCUDAError( "hasChangeDev Free" );
printf( "\n\n....End: executeKernelLabelComponents\n\n" );
return components;
}
|
39542ed0311ca24ed80734fe0c34a4414dc0afab.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "split_pairwise.cuh"
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
namespace NKernel {
//System size <= ROW_SIZE number of rows for decompose,
// in pfound and pair classification we don't need last line
template <int BLOCK_SIZE>
__launch_bounds__(BLOCK_SIZE)
__global__ void ExtractMatricesAndTargetsImpl(const float* linearSystem,
const int matCount,
const int rowSize,
float* matrices,
float* targets) {
const int lineSize = 32;
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int localMatrixIdx = threadIdx.x / lineSize;
int matrixIdx = blockIdx.x * matricesPerBlock + localMatrixIdx;
if (matrixIdx >= matCount) {
return;
}
linearSystem += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2 + rowSize);
matrices += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2);
targets += ((size_t)matrixIdx) * rowSize;
const int x = threadIdx.x & (lineSize - 1);
#pragma unroll 8
for (int i = x; i < rowSize * (rowSize + 1) / 2; i += lineSize) {
matrices[i] = linearSystem[i];
}
#pragma unroll 8
for (int i = x; i < rowSize; i += lineSize) {
targets[i] = linearSystem[rowSize * (rowSize + 1) / 2 + i];
}
}
void ExtractMatricesAndTargets(const float* linearSystem, int matCount, int rowSize, float* matrices, float* targets, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * 32 + blockSize - 1) / blockSize;
if (numBlocks > 0) {
ExtractMatricesAndTargetsImpl<blockSize> << < numBlocks, blockSize, 0, stream >> > (linearSystem, matCount, rowSize, matrices, targets);
}
}
//System size <= ROW_SIZE number of rows for decompose,
// in pfound and pair classification we don't need last line
template <int BLOCK_SIZE, int ROW_SIZE, int SYSTEM_SIZE>
__launch_bounds__(BLOCK_SIZE)
__global__ void CholeskyDecompositionImpl(float* lower, int matCount) {
const int lineSize = (ROW_SIZE < 32 ? ROW_SIZE : 32);
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int localMatrixIdx = threadIdx.x / lineSize;
int matrixIdx = blockIdx.x * matricesPerBlock + localMatrixIdx;
if (matrixIdx >= matCount)
return;
lower += ((size_t)matrixIdx) * (ROW_SIZE * (ROW_SIZE + 1) / 2);
const int x = threadIdx.x & (lineSize - 1);
__shared__ float currentLineData[matricesPerBlock * ROW_SIZE];
volatile float* currentLine = ¤tLineData[localMatrixIdx * ROW_SIZE];
__shared__ float dotRowData[BLOCK_SIZE];
volatile float* dotRow = &dotRowData[localMatrixIdx * lineSize];
__shared__ float LjjData[matricesPerBlock];
volatile float* Ljj = &LjjData[localMatrixIdx];
if (x == 0) {
const float l00 = __ldg(lower);
lower[0] = sqrtf(l00);
}
__syncthreads();
// #pragma unroll
for (int row = 1; row < SYSTEM_SIZE; ++row) {
//we don't modify this value in matrix, so it's pretty safe to load it with ldg.
#pragma unroll
for (int col = x; col < SYSTEM_SIZE; col += 32) {
currentLine[col] = col <= row ? LdgWithFallback(lower, row * (row + 1) / 2 + col) : 0.0f;
}
__syncwarp();
int reduceSize = 1;
#pragma unroll
for (int col = 0; col < row; ++col) {
if (col & reduceSize) {
reduceSize <<= 1;
}
{
float tmp = 0;
#pragma unroll
for (int colIdx = x; colIdx <= col; colIdx += 32) {
const float val = lower[col * (col + 1) / 2 + colIdx];
tmp += colIdx < col ? val * currentLine[colIdx] : 0;
if (colIdx == col) {
Ljj[0] = val;
}
}
dotRow[x] = tmp;
}
const float sum = WarpReduce(x, dotRow, min(reduceSize, 32));
if (x == 0) {
currentLine[col] = Ljj[0] > 0 ? (currentLine[col] - sum) / (Ljj[0] + 1e-9f) : 0.0f;
}
__syncwarp();
}
{
{
float tmp = 0;
#pragma unroll
for (int col = x; col < row; col += 32) {
tmp += currentLine[col] * currentLine[col];
}
__syncwarp();
dotRow[x] = tmp;
}
const float sum = WarpReduce(x, dotRow, min(reduceSize, 32));
if (x == 0) {
const float tmp = currentLine[row] - sum;
currentLine[row] = tmp > 1e-4f ? sqrtf(tmp) : 1e-2f;
}
__syncwarp();
}
#pragma unroll
for (int colIdx = x; colIdx <= row; colIdx += 32) {
lower[row * (row + 1) / 2 + colIdx] = currentLine[colIdx];
}
__syncthreads();
}
}
class TDirectSystem {
private:
const float* Data;
float* Target;
public:
__device__ TDirectSystem(const float* data, float* target, int rowSize)
: Data(data)
, Target(target)
{
(void)rowSize;
}
__forceinline__ __device__ float Get(int row, int col) const {
return LdgWithFallback(Data, row * (row + 1) / 2 + col);
}
__forceinline__ __device__ float GetTarget(int row) const {
return LdgWithFallback(Target, row);
}
__forceinline__ __device__ void WriteSolution(int row, float solution) const {
Target[row] = solution;
}
};
class TTransposedSystem {
private:
const float* Data;
float* Target;
int RowSize;
public:
__device__ TTransposedSystem(const float* data, float* target, int rowSize)
: Data(data)
, Target(target)
, RowSize(rowSize) {
}
__forceinline__ __device__ float Get(int row, int col) const {
row = RowSize - row - 1;
col = RowSize - col - 1;
return LdgWithFallback(Data, col * (col + 1) / 2 + row);
}
__forceinline__ __device__ float GetTarget(int row) const {
return LdgWithFallback(Target, RowSize - row - 1);
}
__forceinline__ __device__ void WriteSolution(int row, float solution) const {
Target[RowSize - row - 1] = solution;
}
};
template <class TLowerMatrixSystem, int BLOCK_SIZE>
__global__ void SolveForwardImpl(const float* lower, int rowSize, int systemSize, int matCount, float* targets) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ float solutionsData[BLOCK_SIZE];
__shared__ float dotProductCacheData[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
lower += ((size_t)matrixIdx) * rowSize * (rowSize + 1) / 2;
targets += matrixIdx * rowSize;
float* solutions = &solutionsData[inBlockOffset * rowSize];
float* dotProductCache = &dotProductCacheData[inBlockOffset * rowSize];
TLowerMatrixSystem system(lower, targets, systemSize);
solutions[col] = col < systemSize ? system.GetTarget(col) : 0;
__syncthreads();
int reduceSize = 1;
#pragma unroll
for (int row = 0; row < systemSize; ++row) {
if (row & reduceSize) {
reduceSize <<= 1;
}
dotProductCache[col] = col <= row ? system.Get(row, col) : 0.0f;
__syncthreads();
float lastCoeff = 0.0f;
if (col == 0) {
lastCoeff = dotProductCache[row];
dotProductCache[row] = 0;
}
__syncthreads();
dotProductCache[col] *= solutions[col];
__syncthreads();
const float sum = FastInBlockReduce(col, dotProductCache, reduceSize);
if (col == 0) {
solutions[row] = lastCoeff > 1e-20f ? (solutions[row] - sum) / (lastCoeff + 1e-20f) : 0;
}
__syncthreads();
}
if (col < systemSize) {
system.WriteSolution(col, solutions[col]);
}
}
template <int BLOCK_SIZE>
__global__ void RegularizeImpl(float* lower, int rowSize,
int matCount, float lambda0, float lambda1) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
lower += ((size_t)matrixIdx) * rowSize * (rowSize + 1) / 2;
const int col = threadIdx.x & (rowSize - 1);
if (matrixIdx >= matCount) {
return;
}
const float cellPrior = 1.0f / rowSize;
for (int row = 0; row < rowSize; ++row) {
//beta prior (uniform). Makes rank(lower) = rowSize - 1
if (col <= row) {
float val = lower[row * (row + 1) / 2 + col];
if (col == row && val <= 1e-9f) {
val += 1;
}
val += col < row ? -lambda0 * cellPrior : (lambda0 * (1 - cellPrior) + lambda1);
lower[row * (row + 1) / 2 + col] = val;
}
}
}
void Regularize(float* matrices, int rowSize, int matCount, double lambdaNonDiag, double lambdaDiag, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( RegularizeImpl<blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, matrices, rowSize, matCount, lambdaNonDiag, lambdaDiag);
}
}
template <int BLOCK_SIZE>
__global__ void ZeroMeanImpl(float* solutions, int rowSize, int matCount) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int tid = threadIdx.x;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ double beta[BLOCK_SIZE];
__shared__ double line[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
solutions += matrixIdx * rowSize;
beta[tid] = col != (rowSize - 1) ? solutions[col] : 0;
line[tid] = beta[tid];
__syncthreads();
for (int s = rowSize >> 1; s > 0; s >>= 1) {
if (col < s) {
line[tid] += line[tid + s];
}
__syncthreads();
}
beta[tid] -= line[rowSize * inBlockOffset] / rowSize;
solutions[col] = beta[tid];
}
template <int BLOCK_SIZE>
__global__ void CalcScoresCholeskyImpl(const float* linearSystem,
const float* solutions,
int rowSize,
int matCount,
float* scores) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int tid = threadIdx.x;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ float beta[BLOCK_SIZE];
__shared__ float line[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
linearSystem += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2 + rowSize);
solutions += matrixIdx * rowSize;
scores += matrixIdx;
beta[tid] = solutions[col];
line[tid] = beta[tid];
const float tidTarget = linearSystem[rowSize * (rowSize + 1) / 2 + col];
__syncthreads();
//we store matrix cholesky-decomposition. For score we need to maximize ||beta^{T}L||^2 - 2 <beta, y> (1)
//score to minimize: (A\beta - y)^{T}W(A\beta - y) + \beta^{T} J \beta, where J some positive-defined matrix
//we don't need square sum, so we maximize (1)
{
float partb1 = 0;
#pragma unroll 4
for (int row = 0; row < rowSize; ++row) {
double val = col <= row ? LdgWithFallback(linearSystem, row * (row + 1) / 2 + col)
: LdgWithFallback(linearSystem, col * (col + 1) / 2 + row);
val *= beta[rowSize * inBlockOffset + row];
partb1 += val;
}
line[tid] = beta[tid] * (tidTarget - 0.5 * partb1);
}
__syncthreads();
for (int s = rowSize >> 1; s > 0; s >>= 1) {
if (col < s) {
line[tid] += line[tid + s];
}
__syncthreads();
}
if (col == 0) {
scores[0] = line[tid];
}
}
//Inplace solver
template<int BLOCK_SIZE, int SOLVER_BLOCK_SIZE, int REMOVE_LAST>
inline void RunCholeskySolver(float* matrices, float* solutions,
int rowSize, int matCount,
TCudaStream stream) {
const int numBlocksCholesky = (matCount * min(rowSize, 32) + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (numBlocksCholesky > 0) {
#define CHOLESKY_DECOMPOSITION(ROW_SIZE) \
const int SYSTEM_SIZE = ROW_SIZE - REMOVE_LAST; \
hipLaunchKernelGGL(( CholeskyDecompositionImpl<BLOCK_SIZE, ROW_SIZE, SYSTEM_SIZE>) , dim3(numBlocksCholesky), dim3(BLOCK_SIZE), 0, stream, matrices, matCount); \
break;
switch (rowSize) {
case 1: {
CHOLESKY_DECOMPOSITION(1);
}
case 2: {
CHOLESKY_DECOMPOSITION(2);
}
case 4: {
CHOLESKY_DECOMPOSITION(4);
}
case 8: {
CHOLESKY_DECOMPOSITION(8);
}
case 16: {
CHOLESKY_DECOMPOSITION(16);
}
case 32: {
CHOLESKY_DECOMPOSITION(32);
}
case 64: {
CHOLESKY_DECOMPOSITION(64);
}
case 128: {
CHOLESKY_DECOMPOSITION(128);
}
case 256: {
CHOLESKY_DECOMPOSITION(256);
}
}
const int solverNumBlocks = (matCount * rowSize + SOLVER_BLOCK_SIZE - 1) / SOLVER_BLOCK_SIZE;
if (solverNumBlocks) {
SolveForwardImpl<TDirectSystem, SOLVER_BLOCK_SIZE> << < solverNumBlocks, SOLVER_BLOCK_SIZE, 0, stream >> > (matrices, rowSize, rowSize - REMOVE_LAST, matCount, solutions);
SolveForwardImpl<TTransposedSystem, SOLVER_BLOCK_SIZE> << < solverNumBlocks, SOLVER_BLOCK_SIZE, 0, stream >> > (matrices, rowSize, rowSize - REMOVE_LAST, matCount, solutions);
}
}
}
template<int BLOCK_SIZE>
inline void RunCalcScores(const float* linearSystem, const float* solutions, int rowSize, float* scores,
int matCount, TCudaStream stream) {
const int numBlocks = (matCount * BLOCK_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE;
CalcScoresCholeskyImpl<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE, 0, stream >> >(linearSystem, solutions, rowSize, matCount, scores);
}
void ZeroMean(float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
ZeroMeanImpl<blockSize> << < numBlocks, blockSize, 0, stream >> > (solutions, rowSize, matCount);
}
}
void CalcScores(const float* linearSystem, const float* solutions,
float* scores, int rowSize, int matCount, TCudaStream stream)
{
if (rowSize == 256) {
RunCalcScores<256>(linearSystem, solutions, rowSize, scores, matCount, stream);
} else {
RunCalcScores<128>(linearSystem, solutions, rowSize, scores, matCount, stream);
}
}
void CholeskySolver(float* matrices, float* solutions, int rowSize, int matCount, bool removeLast, TCudaStream stream)
{
if (TArchProps::GetMajorVersion() == 2) {
if (removeLast) {
RunCholeskySolver<192, 256, 1>(matrices, solutions, rowSize, matCount, stream);
} else {
RunCholeskySolver<192, 256, 0>(matrices, solutions, rowSize, matCount, stream);
}
} else {
if (removeLast) {
RunCholeskySolver<128, 256, 1>(matrices, solutions, rowSize, matCount, stream);
} else {
RunCholeskySolver<128, 256, 0>(matrices, solutions, rowSize, matCount, stream);
}
}
}
void SolverForward(float* matrices, float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( SolveForwardImpl<TDirectSystem, blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, matrices, rowSize, rowSize - 1, matCount, solutions);
}
}
void SolverBackward(float* matrices, float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( SolveForwardImpl<TTransposedSystem, blockSize>), dim3(numBlocks), dim3(blockSize), 0, stream, matrices, rowSize, rowSize - 1, matCount, solutions);
}
}
}
| 39542ed0311ca24ed80734fe0c34a4414dc0afab.cu | #include "split_pairwise.cuh"
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
namespace NKernel {
//System size <= ROW_SIZE — number of rows for decompose,
// in pfound and pair classification we don't need last line
template <int BLOCK_SIZE>
__launch_bounds__(BLOCK_SIZE)
__global__ void ExtractMatricesAndTargetsImpl(const float* linearSystem,
const int matCount,
const int rowSize,
float* matrices,
float* targets) {
const int lineSize = 32;
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int localMatrixIdx = threadIdx.x / lineSize;
int matrixIdx = blockIdx.x * matricesPerBlock + localMatrixIdx;
if (matrixIdx >= matCount) {
return;
}
linearSystem += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2 + rowSize);
matrices += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2);
targets += ((size_t)matrixIdx) * rowSize;
const int x = threadIdx.x & (lineSize - 1);
#pragma unroll 8
for (int i = x; i < rowSize * (rowSize + 1) / 2; i += lineSize) {
matrices[i] = linearSystem[i];
}
#pragma unroll 8
for (int i = x; i < rowSize; i += lineSize) {
targets[i] = linearSystem[rowSize * (rowSize + 1) / 2 + i];
}
}
void ExtractMatricesAndTargets(const float* linearSystem, int matCount, int rowSize, float* matrices, float* targets, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * 32 + blockSize - 1) / blockSize;
if (numBlocks > 0) {
ExtractMatricesAndTargetsImpl<blockSize> << < numBlocks, blockSize, 0, stream >> > (linearSystem, matCount, rowSize, matrices, targets);
}
}
//System size <= ROW_SIZE — number of rows for decompose,
// in pfound and pair classification we don't need last line
template <int BLOCK_SIZE, int ROW_SIZE, int SYSTEM_SIZE>
__launch_bounds__(BLOCK_SIZE)
__global__ void CholeskyDecompositionImpl(float* lower, int matCount) {
const int lineSize = (ROW_SIZE < 32 ? ROW_SIZE : 32);
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int localMatrixIdx = threadIdx.x / lineSize;
int matrixIdx = blockIdx.x * matricesPerBlock + localMatrixIdx;
if (matrixIdx >= matCount)
return;
lower += ((size_t)matrixIdx) * (ROW_SIZE * (ROW_SIZE + 1) / 2);
const int x = threadIdx.x & (lineSize - 1);
__shared__ float currentLineData[matricesPerBlock * ROW_SIZE];
volatile float* currentLine = ¤tLineData[localMatrixIdx * ROW_SIZE];
__shared__ float dotRowData[BLOCK_SIZE];
volatile float* dotRow = &dotRowData[localMatrixIdx * lineSize];
__shared__ float LjjData[matricesPerBlock];
volatile float* Ljj = &LjjData[localMatrixIdx];
if (x == 0) {
const float l00 = __ldg(lower);
lower[0] = sqrtf(l00);
}
__syncthreads();
// #pragma unroll
for (int row = 1; row < SYSTEM_SIZE; ++row) {
//we don't modify this value in matrix, so it's pretty safe to load it with ldg.
#pragma unroll
for (int col = x; col < SYSTEM_SIZE; col += 32) {
currentLine[col] = col <= row ? LdgWithFallback(lower, row * (row + 1) / 2 + col) : 0.0f;
}
__syncwarp();
int reduceSize = 1;
#pragma unroll
for (int col = 0; col < row; ++col) {
if (col & reduceSize) {
reduceSize <<= 1;
}
{
float tmp = 0;
#pragma unroll
for (int colIdx = x; colIdx <= col; colIdx += 32) {
const float val = lower[col * (col + 1) / 2 + colIdx];
tmp += colIdx < col ? val * currentLine[colIdx] : 0;
if (colIdx == col) {
Ljj[0] = val;
}
}
dotRow[x] = tmp;
}
const float sum = WarpReduce(x, dotRow, min(reduceSize, 32));
if (x == 0) {
currentLine[col] = Ljj[0] > 0 ? (currentLine[col] - sum) / (Ljj[0] + 1e-9f) : 0.0f;
}
__syncwarp();
}
{
{
float tmp = 0;
#pragma unroll
for (int col = x; col < row; col += 32) {
tmp += currentLine[col] * currentLine[col];
}
__syncwarp();
dotRow[x] = tmp;
}
const float sum = WarpReduce(x, dotRow, min(reduceSize, 32));
if (x == 0) {
const float tmp = currentLine[row] - sum;
currentLine[row] = tmp > 1e-4f ? sqrtf(tmp) : 1e-2f;
}
__syncwarp();
}
#pragma unroll
for (int colIdx = x; colIdx <= row; colIdx += 32) {
lower[row * (row + 1) / 2 + colIdx] = currentLine[colIdx];
}
__syncthreads();
}
}
class TDirectSystem {
private:
const float* Data;
float* Target;
public:
__device__ TDirectSystem(const float* data, float* target, int rowSize)
: Data(data)
, Target(target)
{
(void)rowSize;
}
__forceinline__ __device__ float Get(int row, int col) const {
return LdgWithFallback(Data, row * (row + 1) / 2 + col);
}
__forceinline__ __device__ float GetTarget(int row) const {
return LdgWithFallback(Target, row);
}
__forceinline__ __device__ void WriteSolution(int row, float solution) const {
Target[row] = solution;
}
};
class TTransposedSystem {
private:
const float* Data;
float* Target;
int RowSize;
public:
__device__ TTransposedSystem(const float* data, float* target, int rowSize)
: Data(data)
, Target(target)
, RowSize(rowSize) {
}
__forceinline__ __device__ float Get(int row, int col) const {
row = RowSize - row - 1;
col = RowSize - col - 1;
return LdgWithFallback(Data, col * (col + 1) / 2 + row);
}
__forceinline__ __device__ float GetTarget(int row) const {
return LdgWithFallback(Target, RowSize - row - 1);
}
__forceinline__ __device__ void WriteSolution(int row, float solution) const {
Target[RowSize - row - 1] = solution;
}
};
template <class TLowerMatrixSystem, int BLOCK_SIZE>
__global__ void SolveForwardImpl(const float* lower, int rowSize, int systemSize, int matCount, float* targets) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ float solutionsData[BLOCK_SIZE];
__shared__ float dotProductCacheData[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
lower += ((size_t)matrixIdx) * rowSize * (rowSize + 1) / 2;
targets += matrixIdx * rowSize;
float* solutions = &solutionsData[inBlockOffset * rowSize];
float* dotProductCache = &dotProductCacheData[inBlockOffset * rowSize];
TLowerMatrixSystem system(lower, targets, systemSize);
solutions[col] = col < systemSize ? system.GetTarget(col) : 0;
__syncthreads();
int reduceSize = 1;
#pragma unroll
for (int row = 0; row < systemSize; ++row) {
if (row & reduceSize) {
reduceSize <<= 1;
}
dotProductCache[col] = col <= row ? system.Get(row, col) : 0.0f;
__syncthreads();
float lastCoeff = 0.0f;
if (col == 0) {
lastCoeff = dotProductCache[row];
dotProductCache[row] = 0;
}
__syncthreads();
dotProductCache[col] *= solutions[col];
__syncthreads();
const float sum = FastInBlockReduce(col, dotProductCache, reduceSize);
if (col == 0) {
solutions[row] = lastCoeff > 1e-20f ? (solutions[row] - sum) / (lastCoeff + 1e-20f) : 0;
}
__syncthreads();
}
if (col < systemSize) {
system.WriteSolution(col, solutions[col]);
}
}
template <int BLOCK_SIZE>
__global__ void RegularizeImpl(float* lower, int rowSize,
int matCount, float lambda0, float lambda1) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
lower += ((size_t)matrixIdx) * rowSize * (rowSize + 1) / 2;
const int col = threadIdx.x & (rowSize - 1);
if (matrixIdx >= matCount) {
return;
}
const float cellPrior = 1.0f / rowSize;
for (int row = 0; row < rowSize; ++row) {
//beta prior (uniform). Makes rank(lower) = rowSize - 1
if (col <= row) {
float val = lower[row * (row + 1) / 2 + col];
if (col == row && val <= 1e-9f) {
val += 1;
}
val += col < row ? -lambda0 * cellPrior : (lambda0 * (1 - cellPrior) + lambda1);
lower[row * (row + 1) / 2 + col] = val;
}
}
}
void Regularize(float* matrices, int rowSize, int matCount, double lambdaNonDiag, double lambdaDiag, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
RegularizeImpl<blockSize><<<numBlocks, blockSize, 0, stream>>>(matrices, rowSize, matCount, lambdaNonDiag, lambdaDiag);
}
}
template <int BLOCK_SIZE>
__global__ void ZeroMeanImpl(float* solutions, int rowSize, int matCount) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int tid = threadIdx.x;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ double beta[BLOCK_SIZE];
__shared__ double line[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
solutions += matrixIdx * rowSize;
beta[tid] = col != (rowSize - 1) ? solutions[col] : 0;
line[tid] = beta[tid];
__syncthreads();
for (int s = rowSize >> 1; s > 0; s >>= 1) {
if (col < s) {
line[tid] += line[tid + s];
}
__syncthreads();
}
beta[tid] -= line[rowSize * inBlockOffset] / rowSize;
solutions[col] = beta[tid];
}
template <int BLOCK_SIZE>
__global__ void CalcScoresCholeskyImpl(const float* linearSystem,
const float* solutions,
int rowSize,
int matCount,
float* scores) {
const int matricesPerBlock = BLOCK_SIZE / rowSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / rowSize;
const int tid = threadIdx.x;
const int col = threadIdx.x & (rowSize - 1);
const int inBlockOffset = threadIdx.x / rowSize;
__shared__ float beta[BLOCK_SIZE];
__shared__ float line[BLOCK_SIZE];
if (matrixIdx >= matCount) {
return;
}
linearSystem += ((size_t)matrixIdx) * (rowSize * (rowSize + 1) / 2 + rowSize);
solutions += matrixIdx * rowSize;
scores += matrixIdx;
beta[tid] = solutions[col];
line[tid] = beta[tid];
const float tidTarget = linearSystem[rowSize * (rowSize + 1) / 2 + col];
__syncthreads();
//we store matrix cholesky-decomposition. For score we need to maximize ||beta^{T}L||^2 - 2 <beta, y> (1)
//score to minimize: (A\beta - y)^{T}W(A\beta - y) + \beta^{T} J \beta, where J — some positive-defined matrix
//we don't need square sum, so we maximize (1)
{
float partb1 = 0;
#pragma unroll 4
for (int row = 0; row < rowSize; ++row) {
double val = col <= row ? LdgWithFallback(linearSystem, row * (row + 1) / 2 + col)
: LdgWithFallback(linearSystem, col * (col + 1) / 2 + row);
val *= beta[rowSize * inBlockOffset + row];
partb1 += val;
}
line[tid] = beta[tid] * (tidTarget - 0.5 * partb1);
}
__syncthreads();
for (int s = rowSize >> 1; s > 0; s >>= 1) {
if (col < s) {
line[tid] += line[tid + s];
}
__syncthreads();
}
if (col == 0) {
scores[0] = line[tid];
}
}
//Inplace solver
template<int BLOCK_SIZE, int SOLVER_BLOCK_SIZE, int REMOVE_LAST>
inline void RunCholeskySolver(float* matrices, float* solutions,
int rowSize, int matCount,
TCudaStream stream) {
const int numBlocksCholesky = (matCount * min(rowSize, 32) + BLOCK_SIZE - 1) / BLOCK_SIZE;
if (numBlocksCholesky > 0) {
#define CHOLESKY_DECOMPOSITION(ROW_SIZE) \
const int SYSTEM_SIZE = ROW_SIZE - REMOVE_LAST; \
CholeskyDecompositionImpl<BLOCK_SIZE, ROW_SIZE, SYSTEM_SIZE> <<< numBlocksCholesky, BLOCK_SIZE, 0, stream>>> (matrices, matCount); \
break;
switch (rowSize) {
case 1: {
CHOLESKY_DECOMPOSITION(1);
}
case 2: {
CHOLESKY_DECOMPOSITION(2);
}
case 4: {
CHOLESKY_DECOMPOSITION(4);
}
case 8: {
CHOLESKY_DECOMPOSITION(8);
}
case 16: {
CHOLESKY_DECOMPOSITION(16);
}
case 32: {
CHOLESKY_DECOMPOSITION(32);
}
case 64: {
CHOLESKY_DECOMPOSITION(64);
}
case 128: {
CHOLESKY_DECOMPOSITION(128);
}
case 256: {
CHOLESKY_DECOMPOSITION(256);
}
}
const int solverNumBlocks = (matCount * rowSize + SOLVER_BLOCK_SIZE - 1) / SOLVER_BLOCK_SIZE;
if (solverNumBlocks) {
SolveForwardImpl<TDirectSystem, SOLVER_BLOCK_SIZE> << < solverNumBlocks, SOLVER_BLOCK_SIZE, 0, stream >> > (matrices, rowSize, rowSize - REMOVE_LAST, matCount, solutions);
SolveForwardImpl<TTransposedSystem, SOLVER_BLOCK_SIZE> << < solverNumBlocks, SOLVER_BLOCK_SIZE, 0, stream >> > (matrices, rowSize, rowSize - REMOVE_LAST, matCount, solutions);
}
}
}
template<int BLOCK_SIZE>
inline void RunCalcScores(const float* linearSystem, const float* solutions, int rowSize, float* scores,
int matCount, TCudaStream stream) {
const int numBlocks = (matCount * BLOCK_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE;
CalcScoresCholeskyImpl<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE, 0, stream >> >(linearSystem, solutions, rowSize, matCount, scores);
}
void ZeroMean(float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
ZeroMeanImpl<blockSize> << < numBlocks, blockSize, 0, stream >> > (solutions, rowSize, matCount);
}
}
void CalcScores(const float* linearSystem, const float* solutions,
float* scores, int rowSize, int matCount, TCudaStream stream)
{
if (rowSize == 256) {
RunCalcScores<256>(linearSystem, solutions, rowSize, scores, matCount, stream);
} else {
RunCalcScores<128>(linearSystem, solutions, rowSize, scores, matCount, stream);
}
}
void CholeskySolver(float* matrices, float* solutions, int rowSize, int matCount, bool removeLast, TCudaStream stream)
{
if (TArchProps::GetMajorVersion() == 2) {
if (removeLast) {
RunCholeskySolver<192, 256, 1>(matrices, solutions, rowSize, matCount, stream);
} else {
RunCholeskySolver<192, 256, 0>(matrices, solutions, rowSize, matCount, stream);
}
} else {
if (removeLast) {
RunCholeskySolver<128, 256, 1>(matrices, solutions, rowSize, matCount, stream);
} else {
RunCholeskySolver<128, 256, 0>(matrices, solutions, rowSize, matCount, stream);
}
}
}
void SolverForward(float* matrices, float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
SolveForwardImpl<TDirectSystem, blockSize><<<numBlocks, blockSize, 0, stream>>>(matrices, rowSize, rowSize - 1, matCount, solutions);
}
}
void SolverBackward(float* matrices, float* solutions, int rowSize, int matCount, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (matCount * rowSize + blockSize - 1) / blockSize;
if (numBlocks > 0) {
SolveForwardImpl<TTransposedSystem, blockSize><<<numBlocks, blockSize, 0, stream>>>(matrices, rowSize, rowSize - 1, matCount, solutions);
}
}
}
|
644ad9def4a7f01054d83612d2791ba00402b328.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_sub(char* newB, char* first, char* second, int size_biggest, int diff, int * size_newB) {
int tmp = 0;
int i = threadIdx.x;
#if __CUDA_ARCH__>=200
//printf("#threadIdx.x = %d\n", threadIdx.x);
#endif
if (i == 0) return;
//for (int i = size_biggest - 1; i >= 0; i--) {
if (i - 1 - diff >= 0 && (second[i - 1 - diff] != '+' && second[i - 1 - diff] != '-')) {
tmp = first[i - 1] - second[i-1-diff];
} else if (first[i - 1] != '+' && first[i - 1] != '-') {
tmp = first[i - 1];
}
if (tmp < 0) {
// warning 10 - tmp ?
newB[i - 1]--;
tmp += 10;
}
if (i != 0)
newB[i] += tmp;
//}
} | 644ad9def4a7f01054d83612d2791ba00402b328.cu | #include "includes.h"
__global__ void kernel_sub(char* newB, char* first, char* second, int size_biggest, int diff, int * size_newB) {
int tmp = 0;
int i = threadIdx.x;
#if __CUDA_ARCH__>=200
//printf("#threadIdx.x = %d\n", threadIdx.x);
#endif
if (i == 0) return;
//for (int i = size_biggest - 1; i >= 0; i--) {
if (i - 1 - diff >= 0 && (second[i - 1 - diff] != '+' && second[i - 1 - diff] != '-')) {
tmp = first[i - 1] - second[i-1-diff];
} else if (first[i - 1] != '+' && first[i - 1] != '-') {
tmp = first[i - 1];
}
if (tmp < 0) {
// warning 10 - tmp ?
newB[i - 1]--;
tmp += 10;
}
if (i != 0)
newB[i] += tmp;
//}
} |
a024d2945ede492687dc16c47e80236206b883e6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void square(float * d_out, float * d_in){
int idx = threadIdx.x + blockIdx.x*(2048/4);
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 2048;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( square), dim3(4), dim3(ARRAY_SIZE/4), 0, 0, d_out, d_in);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
hipError_t err;
if ( hipSuccess != (err = hipGetLastError()) ){
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString( err ) );
exit(-2);
}
return 0;
} | a024d2945ede492687dc16c47e80236206b883e6.cu | #include <stdio.h>
__global__ void square(float * d_out, float * d_in){
int idx = threadIdx.x + blockIdx.x*(2048/4);
float f = d_in[idx];
d_out[idx] = f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 2048;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
square<<<4, ARRAY_SIZE/4>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
cudaError err;
if ( cudaSuccess != (err = cudaGetLastError()) ){
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString( err ) );
exit(-2);
}
return 0;
} |
c6a9ecfe6392c0cb42907ab64dde4d72da947f63.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
hipGetDevice(&deviceId);
hipDeviceGetAttribute(&numberOfSMs, hipDeviceAttributeMultiprocessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
float *h_c;
hipMalloc(&a, size);
hipMalloc(&b, size);
hipMalloc(&c, size);
hipHostMalloc(&h_c, size);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
hipError_t addVectorsErr;
hipError_t asyncErr;
/*
* Create 3 streams to run initialize the 3 data vectors in parallel.
*/
hipStream_t stream1, stream2, stream3;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
/*
* Give each `initWith` launch its own non-standard stream.
*/
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream1, 3, a, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream2, 4, b, N);
hipLaunchKernelGGL(( initWith), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream3, 0, c, N);
const int numberOfSegments = 4;
int segmentN = N / numberOfSegments;
size_t segmentSize = size / numberOfSegments;
for(int i = 0; i < numberOfSegments; i++){
int segmentOffset = i * segmentN;
hipStream_t stream;
hipStreamCreate(&stream);
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, stream, c[segmentOffset], a[segmentOffset], b[segmentOffset], segmentSize);
hipMemcpyAsync(&h_c[segmentOffset], &c[segmentOffset], segmentSize, hipMemcpyDeviceToHost, stream);
hipStreamDestroy(stream);
}
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, h_c, N);
/*
* Destroy streams when they are no longer needed.
*/
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
hipStreamDestroy(stream3);
hipFree(a);
hipFree(b);
hipFree(c);
hipHostFree(h_c);
}
| c6a9ecfe6392c0cb42907ab64dde4d72da947f63.cu | #include <stdio.h>
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
float *h_c;
cudaMalloc(&a, size);
cudaMalloc(&b, size);
cudaMalloc(&c, size);
cudaMallocHost(&h_c, size);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
/*
* Create 3 streams to run initialize the 3 data vectors in parallel.
*/
cudaStream_t stream1, stream2, stream3;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
/*
* Give each `initWith` launch its own non-standard stream.
*/
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream1>>>(3, a, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream2>>>(4, b, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream3>>>(0, c, N);
const int numberOfSegments = 4;
int segmentN = N / numberOfSegments;
size_t segmentSize = size / numberOfSegments;
for(int i = 0; i < numberOfSegments; i++){
int segmentOffset = i * segmentN;
cudaStream_t stream;
cudaStreamCreate(&stream);
addVectorsInto<<<numberOfBlocks, threadsPerBlock, 0, stream>>>(c[segmentOffset], a[segmentOffset], b[segmentOffset], segmentSize);
cudaMemcpyAsync(&h_c[segmentOffset], &c[segmentOffset], segmentSize, cudaMemcpyDeviceToHost, stream);
cudaStreamDestroy(stream);
}
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, h_c, N);
/*
* Destroy streams when they are no longer needed.
*/
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaFreeHost(h_c);
}
|
8e7cd28173cb38bef0d7f3b6908f5ebf13dea07a.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
namespace at { namespace native {
namespace {
void zeta_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t q) -> scalar_t {
return zeta<scalar_t, /*is_cuda=*/true>(x, q);
});
});
}
} // namespace (anonymous)
REGISTER_DISPATCH(zeta_stub, &zeta_kernel_cuda);
}} // namespace at::native
| 8e7cd28173cb38bef0d7f3b6908f5ebf13dea07a.cu | #include <ATen/Dispatch.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/BinaryOps.h>
#include <ATen/native/Math.h>
namespace at { namespace native {
namespace {
void zeta_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES(iter.common_dtype(), "zeta_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t x, scalar_t q) -> scalar_t {
return zeta<scalar_t, /*is_cuda=*/true>(x, q);
});
});
}
} // namespace (anonymous)
REGISTER_DISPATCH(zeta_stub, &zeta_kernel_cuda);
}} // namespace at::native
|
32444080e175a59cde31e5bc88f70677e882780b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <opencv2/highgui/highgui.hpp>
#include "gcube.h"
#include "gpu_util.h"
gcube::gcube(void) {
this->d_pixels = NULL;
this->create(0, 0, 0, gfill::none);
}
gcube::gcube(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) {
this->d_pixels = NULL;
this->create(n_rows, n_cols, n_slices, fill_type);
}
gcube::gcube(const gcube &gpucube) {
this->d_pixels = NULL;
this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none);
checkCudaErrors(hipMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToDevice));
}
gcube::gcube(const std::string &fname) {
this->d_pixels = NULL;
this->load(fname);
}
//gcube::gcube(const std::vector<float> list) {
// this->d_pixels = NULL;
// this->create(list);
//}
gcube::~gcube(void) {
if (this->d_pixels) {
checkCudaErrors(hipFree(this->d_pixels));
}
}
__global__ void GPU_map_assign(float *F, float val, size_t n_elems) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elems) {
return;
}
F[idx] = val;
}
void gcube::create(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) {
if (this->d_pixels) {
checkCudaErrors(hipFree(d_pixels));
}
this->n_rows = n_rows;
this->n_cols = n_cols;
this->n_slices = n_slices;
this->n_elem = n_rows * n_cols * n_slices;
if (this->n_elem == 0) {
this->d_pixels = NULL;
} else {
checkCudaErrors(hipMalloc(&this->d_pixels, this->n_elem * sizeof(float)));
switch (fill_type) {
case gfill::none:
break;
case gfill::zeros:
checkCudaErrors(hipMemset(this->d_pixels, 0, this->n_elem * sizeof(float)));
break;
case gfill::ones:
hipLaunchKernelGGL(( GPU_map_assign), dim3((this->n_elem-1) / 128 + 1), dim3(128), 0, 0, this->d_pixels, 1, this->n_elem);
checkCudaErrors(hipGetLastError());
break;
default:
break;
}
}
}
gcube &gcube::operator=(const gcube &gpucube) {
this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none);
checkCudaErrors(hipMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToDevice));
return *this;
}
void gcube::load(const std::string &fname) {
this->create(cv::imread(fname));
}
void gcube::save(const std::string &fname) {
cv::imwrite(fname, this->cv_mat());
}
// Specific OpenCV interaction (to make sure that they are backwards compatible)
gcube::gcube(cv::Mat &cvMat) {
this->d_pixels = NULL;
this->create(cvMat);
}
void gcube::create(const cv::Mat &cvMat) {
this->create(cvMat.rows, cvMat.cols, cvMat.channels(), gfill::none);
float *h_pixels = new float[this->n_elem];
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
cv::Vec3b color = cvMat.at<cv::Vec3b>(i, j);
for (int k = 0; k < this->n_slices; k++) {
h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)] = (float)color[k] / 255.0f;
}
}
}
checkCudaErrors(hipMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), hipMemcpyHostToDevice));
free(h_pixels);
}
void gcube::create(const cv::Mat &cvMat, int x1, int x2, int y1, int y2) {
assert(x1 <= x2 && y1 <= y2 && x2 <= cvMat.cols && y2 <= cvMat.rows);
this->create(y2 - y1, x2 - x1, cvMat.channels(), gfill::none);
float *h_pixels = new float[this->n_elem];
for (int i = y1; i < y2; i++) {
for (int j = x1; j < x2; j++) {
cv::Vec3b color = cvMat.at<cv::Vec3b>(i, j);
for (int k = 0; k < this->n_slices; k++) {
h_pixels[IJK2C(i - y1, j - x1, k, this->n_rows, this->n_cols)] = (float)color[k] / 255.0f;
}
}
}
checkCudaErrors(hipMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), hipMemcpyHostToDevice));
free(h_pixels);
}
cv::Mat gcube::cv_mat(void) {
cv::Mat cv_image(this->n_rows, this->n_cols, CV_8UC3);
float *h_pixels = new float[this->n_elem];
checkCudaErrors(hipMemcpy(h_pixels, this->d_pixels, this->n_elem * sizeof(float), hipMemcpyDeviceToHost));
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
if (this->n_slices == 1) {
cv_image.at<cv::Vec3b>(i, j) =
cv::Vec3b((int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f),
(int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f),
(int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f));
} else if (this->n_slices == 3) {
cv_image.at<cv::Vec3b>(i, j) =
cv::Vec3b((int)(h_pixels[IJK2C(i, j, 0, this->n_rows, this->n_cols)] * 255.0f),
(int)(h_pixels[IJK2C(i, j, 1, this->n_rows, this->n_cols)] * 255.0f),
(int)(h_pixels[IJK2C(i, j, 2, this->n_rows, this->n_cols)] * 255.0f));
}
}
}
free(h_pixels);
return cv_image;
}
gcube &gcube::operator=(const cv::Mat &cvMat) {
this->create(cvMat);
return *this;
}
| 32444080e175a59cde31e5bc88f70677e882780b.cu | #include <opencv2/highgui/highgui.hpp>
#include "gcube.h"
#include "gpu_util.h"
gcube::gcube(void) {
this->d_pixels = NULL;
this->create(0, 0, 0, gfill::none);
}
gcube::gcube(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) {
this->d_pixels = NULL;
this->create(n_rows, n_cols, n_slices, fill_type);
}
gcube::gcube(const gcube &gpucube) {
this->d_pixels = NULL;
this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none);
checkCudaErrors(cudaMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToDevice));
}
gcube::gcube(const std::string &fname) {
this->d_pixels = NULL;
this->load(fname);
}
//gcube::gcube(const std::vector<float> list) {
// this->d_pixels = NULL;
// this->create(list);
//}
gcube::~gcube(void) {
if (this->d_pixels) {
checkCudaErrors(cudaFree(this->d_pixels));
}
}
__global__ void GPU_map_assign(float *F, float val, size_t n_elems) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n_elems) {
return;
}
F[idx] = val;
}
void gcube::create(size_t n_rows, size_t n_cols, size_t n_slices, uint8_t fill_type) {
if (this->d_pixels) {
checkCudaErrors(cudaFree(d_pixels));
}
this->n_rows = n_rows;
this->n_cols = n_cols;
this->n_slices = n_slices;
this->n_elem = n_rows * n_cols * n_slices;
if (this->n_elem == 0) {
this->d_pixels = NULL;
} else {
checkCudaErrors(cudaMalloc(&this->d_pixels, this->n_elem * sizeof(float)));
switch (fill_type) {
case gfill::none:
break;
case gfill::zeros:
checkCudaErrors(cudaMemset(this->d_pixels, 0, this->n_elem * sizeof(float)));
break;
case gfill::ones:
GPU_map_assign<<<(this->n_elem-1) / 128 + 1, 128>>>(this->d_pixels, 1, this->n_elem);
checkCudaErrors(cudaGetLastError());
break;
default:
break;
}
}
}
gcube &gcube::operator=(const gcube &gpucube) {
this->create(gpucube.n_rows, gpucube.n_cols, gpucube.n_slices, gfill::none);
checkCudaErrors(cudaMemcpy(this->d_pixels, gpucube.d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToDevice));
return *this;
}
void gcube::load(const std::string &fname) {
this->create(cv::imread(fname));
}
void gcube::save(const std::string &fname) {
cv::imwrite(fname, this->cv_mat());
}
// Specific OpenCV interaction (to make sure that they are backwards compatible)
gcube::gcube(cv::Mat &cvMat) {
this->d_pixels = NULL;
this->create(cvMat);
}
void gcube::create(const cv::Mat &cvMat) {
this->create(cvMat.rows, cvMat.cols, cvMat.channels(), gfill::none);
float *h_pixels = new float[this->n_elem];
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
cv::Vec3b color = cvMat.at<cv::Vec3b>(i, j);
for (int k = 0; k < this->n_slices; k++) {
h_pixels[IJK2C(i, j, k, this->n_rows, this->n_cols)] = (float)color[k] / 255.0f;
}
}
}
checkCudaErrors(cudaMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), cudaMemcpyHostToDevice));
free(h_pixels);
}
void gcube::create(const cv::Mat &cvMat, int x1, int x2, int y1, int y2) {
assert(x1 <= x2 && y1 <= y2 && x2 <= cvMat.cols && y2 <= cvMat.rows);
this->create(y2 - y1, x2 - x1, cvMat.channels(), gfill::none);
float *h_pixels = new float[this->n_elem];
for (int i = y1; i < y2; i++) {
for (int j = x1; j < x2; j++) {
cv::Vec3b color = cvMat.at<cv::Vec3b>(i, j);
for (int k = 0; k < this->n_slices; k++) {
h_pixels[IJK2C(i - y1, j - x1, k, this->n_rows, this->n_cols)] = (float)color[k] / 255.0f;
}
}
}
checkCudaErrors(cudaMemcpy(this->d_pixels, h_pixels, this->n_elem * sizeof(float), cudaMemcpyHostToDevice));
free(h_pixels);
}
cv::Mat gcube::cv_mat(void) {
cv::Mat cv_image(this->n_rows, this->n_cols, CV_8UC3);
float *h_pixels = new float[this->n_elem];
checkCudaErrors(cudaMemcpy(h_pixels, this->d_pixels, this->n_elem * sizeof(float), cudaMemcpyDeviceToHost));
for (int i = 0; i < this->n_rows; i++) {
for (int j = 0; j < this->n_cols; j++) {
if (this->n_slices == 1) {
cv_image.at<cv::Vec3b>(i, j) =
cv::Vec3b((int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f),
(int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f),
(int)(h_pixels[IJ2C(i, j, this->n_rows)] * 255.0f));
} else if (this->n_slices == 3) {
cv_image.at<cv::Vec3b>(i, j) =
cv::Vec3b((int)(h_pixels[IJK2C(i, j, 0, this->n_rows, this->n_cols)] * 255.0f),
(int)(h_pixels[IJK2C(i, j, 1, this->n_rows, this->n_cols)] * 255.0f),
(int)(h_pixels[IJK2C(i, j, 2, this->n_rows, this->n_cols)] * 255.0f));
}
}
}
free(h_pixels);
return cv_image;
}
gcube &gcube::operator=(const cv::Mat &cvMat) {
this->create(cvMat);
return *this;
}
|
b6b476afe10618420a589d67ee482e8d3470d5d0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include<sys/time.h>
__global__ void square(float * d_out, float * d_in,int mod){
int idx = threadIdx.x;
float f = d_in[idx];
switch(idx%32){
case 0: d_out[idx] = f*f;break;
case 1: d_out[idx] = f*f;break;
case 2: d_out[idx] = f*f;break;
case 3: d_out[idx] = f*f;break;
case 4: d_out[idx] = f*f;break;
case 5: d_out[idx] = f*f;break;
case 6: d_out[idx] = f*f;break;
case 7: d_out[idx] = f*f;break;
case 8: d_out[idx] = f*f;break;
case 9: d_out[idx] = f*f;break;
case 10: d_out[idx] = f*f;break;
case 11: d_out[idx] = f*f;break;
case 12: d_out[idx] = f*f;break;
case 13: d_out[idx] = f*f;break;
case 14: d_out[idx] = f*f;break;
case 15: d_out[idx] = f*f;break;
case 16: d_out[idx] = f*f;break;
case 17: d_out[idx] = f*f;break;
case 18: d_out[idx] = f*f;break;
case 19: d_out[idx] = f*f;break;
case 20: d_out[idx] = f*f;break;
case 21: d_out[idx] = f*f;break;
case 22: d_out[idx] = f*f;break;
case 23: d_out[idx] = f*f;break;
case 24: d_out[idx] = f*f;break;
case 25: d_out[idx] = f*f;break;
case 26: d_out[idx] = f*f;break;
case 27: d_out[idx] = f*f;break;
case 28: d_out[idx] = f*f;break;
case 29: d_out[idx] = f*f;break;
case 30: d_out[idx] = f*f;break;
case 31: d_out[idx] = f*f;break;
}
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
for(int i=1;i<100;i++){
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for(long long int j=0;j<10000000;j++)
hipLaunchKernelGGL(( square), dim3(i), dim3(ARRAY_SIZE), 0, 0, d_out, d_in,32);
gettimeofday(&tv2, NULL);
printf ("%d\t%f\n",i,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
}
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipFree(d_in);
hipFree(d_out);
return 0;
}
| b6b476afe10618420a589d67ee482e8d3470d5d0.cu | #include <stdio.h>
#include<sys/time.h>
__global__ void square(float * d_out, float * d_in,int mod){
int idx = threadIdx.x;
float f = d_in[idx];
switch(idx%32){
case 0: d_out[idx] = f*f;break;
case 1: d_out[idx] = f*f;break;
case 2: d_out[idx] = f*f;break;
case 3: d_out[idx] = f*f;break;
case 4: d_out[idx] = f*f;break;
case 5: d_out[idx] = f*f;break;
case 6: d_out[idx] = f*f;break;
case 7: d_out[idx] = f*f;break;
case 8: d_out[idx] = f*f;break;
case 9: d_out[idx] = f*f;break;
case 10: d_out[idx] = f*f;break;
case 11: d_out[idx] = f*f;break;
case 12: d_out[idx] = f*f;break;
case 13: d_out[idx] = f*f;break;
case 14: d_out[idx] = f*f;break;
case 15: d_out[idx] = f*f;break;
case 16: d_out[idx] = f*f;break;
case 17: d_out[idx] = f*f;break;
case 18: d_out[idx] = f*f;break;
case 19: d_out[idx] = f*f;break;
case 20: d_out[idx] = f*f;break;
case 21: d_out[idx] = f*f;break;
case 22: d_out[idx] = f*f;break;
case 23: d_out[idx] = f*f;break;
case 24: d_out[idx] = f*f;break;
case 25: d_out[idx] = f*f;break;
case 26: d_out[idx] = f*f;break;
case 27: d_out[idx] = f*f;break;
case 28: d_out[idx] = f*f;break;
case 29: d_out[idx] = f*f;break;
case 30: d_out[idx] = f*f;break;
case 31: d_out[idx] = f*f;break;
}
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
for(int i=1;i<100;i++){
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for(long long int j=0;j<10000000;j++)
square<<<i, ARRAY_SIZE>>>(d_out, d_in,32);
gettimeofday(&tv2, NULL);
printf ("%d\t%f\n",i,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
}
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
2a20ced04f525d7edac22b8c6d28607fc514014c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <addTest.h>
__global__ void transformNaiveRowDiagonal(float * MatA,float * MatB,int nx,int ny)
{
int block_y=blockIdx.x;
int block_x=(blockIdx.x+blockIdx.y)%gridDim.x;
int ix=threadIdx.x+blockDim.x*block_x;
int iy=threadIdx.y+blockDim.y*block_y;
int idx_row=ix+iy*nx;
int idx_col=ix*ny+iy;
if (ix<nx && iy<ny) {
MatB[idx_col]=MatA[idx_row];
}
}
__global__ void transformNaiveColDiagonal(float * MatA,float * MatB,int nx,int ny)
{
int block_y=blockIdx.x;
int block_x=(blockIdx.x+blockIdx.y)%gridDim.x;
int ix=threadIdx.x+blockDim.x*block_x;
int iy=threadIdx.y+blockDim.y*block_y;
int idx_row=ix+iy*nx;
int idx_col=ix*ny+iy;
if (ix<nx && iy<ny) {
MatB[idx_row]=MatA[idx_col];
}
}
void transformMatrix2D_CPU(float * matA, float * matB, int nx, int ny) {
for(int i = 0; i < ny; i++) {
for(int j = 0; j < nx; j++) {
matB[i + j * ny] = matA[j + i * nx];
}
}
}
__global__ void transRow(float* matA, float *matB, int nx, int ny) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy * nx + ix;
int idy = ix * ny + iy;
if (ix < nx && iy < ny)
matB[idy] = matA[idx];
}
__global__ void transCol(float * matA, float *matB, int nx, int ny) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy * nx + ix;
int idy = ix * ny + iy;
if (ix < nx && iy < ny)
matB[idx] = matA[idy];
}
int main(void) {
// set num of input , offset of src and dst, and loop_num and kernelType
int nx = 1 << 10;
int ny = 1 << 10;
size_t N = 1 << 20;
int dev = 0;
hipSetDevice(dev);
// malloc for host
float *src_cpu = (float *)malloc(N * sizeof(float));
float *dst_cpu = (float *)malloc(N * sizeof(float));
float *dst_dev_cpu = (float *)malloc(N * sizeof(float));
memset(dst_cpu, 0, N * sizeof(float));
memset(dst_dev_cpu, 0, N * sizeof(float));
initialData(src_cpu, N);
float *src_dev = NULL;
float *dst_dev;
hipMalloc((float**)&src_dev, N * sizeof(float));
hipMalloc((float**)&dst_dev, N * sizeof(float));
// memcpy from host to device
hipMemcpy(src_dev, src_cpu, N * sizeof(float), hipMemcpyHostToDevice);
// update N;
dim3 block(32, 32);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
// gpu compute
hipLaunchKernelGGL(( transRow), dim3(grid), dim3(block), 0, 0, src_dev, dst_dev, nx, ny);
hipLaunchKernelGGL(( transCol), dim3(grid), dim3(block), 0, 0, src_dev, dst_dev, nx, ny);
hipLaunchKernelGGL(( transformNaiveRowDiagonal), dim3(grid), dim3(block), 0, 0, src_dev, dst_dev, nx, ny);
hipLaunchKernelGGL(( transformNaiveColDiagonal), dim3(grid), dim3(block), 0, 0, src_dev, dst_dev, nx, ny);
hipDeviceSynchronize();
hipMemcpy(dst_dev_cpu, dst_dev, N * sizeof(float), hipMemcpyDeviceToHost);
// cpu compute
transformMatrix2D_CPU(src_cpu, dst_cpu, nx, ny);
checkResult(dst_cpu, dst_dev_cpu, N);
// free device
hipFree(src_dev);
hipFree(dst_dev);
// free cpu
free(src_cpu);
free(dst_cpu);
free(dst_dev_cpu);
return 0;
}
| 2a20ced04f525d7edac22b8c6d28607fc514014c.cu | #include <iostream>
#include <math.h>
#include <addTest.h>
__global__ void transformNaiveRowDiagonal(float * MatA,float * MatB,int nx,int ny)
{
int block_y=blockIdx.x;
int block_x=(blockIdx.x+blockIdx.y)%gridDim.x;
int ix=threadIdx.x+blockDim.x*block_x;
int iy=threadIdx.y+blockDim.y*block_y;
int idx_row=ix+iy*nx;
int idx_col=ix*ny+iy;
if (ix<nx && iy<ny) {
MatB[idx_col]=MatA[idx_row];
}
}
__global__ void transformNaiveColDiagonal(float * MatA,float * MatB,int nx,int ny)
{
int block_y=blockIdx.x;
int block_x=(blockIdx.x+blockIdx.y)%gridDim.x;
int ix=threadIdx.x+blockDim.x*block_x;
int iy=threadIdx.y+blockDim.y*block_y;
int idx_row=ix+iy*nx;
int idx_col=ix*ny+iy;
if (ix<nx && iy<ny) {
MatB[idx_row]=MatA[idx_col];
}
}
void transformMatrix2D_CPU(float * matA, float * matB, int nx, int ny) {
for(int i = 0; i < ny; i++) {
for(int j = 0; j < nx; j++) {
matB[i + j * ny] = matA[j + i * nx];
}
}
}
__global__ void transRow(float* matA, float *matB, int nx, int ny) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy * nx + ix;
int idy = ix * ny + iy;
if (ix < nx && iy < ny)
matB[idy] = matA[idx];
}
__global__ void transCol(float * matA, float *matB, int nx, int ny) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
int idx = iy * nx + ix;
int idy = ix * ny + iy;
if (ix < nx && iy < ny)
matB[idx] = matA[idy];
}
int main(void) {
// set num of input , offset of src and dst, and loop_num and kernelType
int nx = 1 << 10;
int ny = 1 << 10;
size_t N = 1 << 20;
int dev = 0;
cudaSetDevice(dev);
// malloc for host
float *src_cpu = (float *)malloc(N * sizeof(float));
float *dst_cpu = (float *)malloc(N * sizeof(float));
float *dst_dev_cpu = (float *)malloc(N * sizeof(float));
memset(dst_cpu, 0, N * sizeof(float));
memset(dst_dev_cpu, 0, N * sizeof(float));
initialData(src_cpu, N);
float *src_dev = NULL;
float *dst_dev;
cudaMalloc((float**)&src_dev, N * sizeof(float));
cudaMalloc((float**)&dst_dev, N * sizeof(float));
// memcpy from host to device
cudaMemcpy(src_dev, src_cpu, N * sizeof(float), cudaMemcpyHostToDevice);
// update N;
dim3 block(32, 32);
dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
// gpu compute
transRow<<<grid, block>>>(src_dev, dst_dev, nx, ny);
transCol<<<grid, block>>>(src_dev, dst_dev, nx, ny);
transformNaiveRowDiagonal<<<grid, block>>>(src_dev, dst_dev, nx, ny);
transformNaiveColDiagonal<<<grid, block>>>(src_dev, dst_dev, nx, ny);
cudaDeviceSynchronize();
cudaMemcpy(dst_dev_cpu, dst_dev, N * sizeof(float), cudaMemcpyDeviceToHost);
// cpu compute
transformMatrix2D_CPU(src_cpu, dst_cpu, nx, ny);
checkResult(dst_cpu, dst_dev_cpu, N);
// free device
cudaFree(src_dev);
cudaFree(dst_dev);
// free cpu
free(src_cpu);
free(dst_cpu);
free(dst_dev_cpu);
return 0;
}
|
50a2beb8548d6106cc05b6b7f2b322b40ba3bc0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "sphereTransform.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
const unsigned int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
sphereTransform), dim3(gridBlock),dim3(threadBlock), 0, 0, data,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
sphereTransform), dim3(gridBlock),dim3(threadBlock), 0, 0, data,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
sphereTransform), dim3(gridBlock),dim3(threadBlock), 0, 0, data,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 50a2beb8548d6106cc05b6b7f2b322b40ba3bc0b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "sphereTransform.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
const unsigned int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
sphereTransform<<<gridBlock,threadBlock>>>(data,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
sphereTransform<<<gridBlock,threadBlock>>>(data,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
sphereTransform<<<gridBlock,threadBlock>>>(data,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3bcc6705207899988bb17957e9eee5456373ba74.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zmgeelltmv.cu, normal z -> d, Mon Jun 25 18:24:25 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
template<bool betazero>
__global__ void
dmgeelltmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
double alpha,
double * dval,
magma_index_t * dcolind,
double * dx,
double beta,
double * dy)
{
extern __shared__ double dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ) {
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_rows * n + row ];
double val = dval [ num_rows * n + row ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
for( int i=0; i<num_vecs; i++ ) {
if (betazero) {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha;
} else {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i*num_cols ];
}
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dmgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( double ); // num_vecs vectors
if (beta == MAGMA_D_ZERO) {
hipLaunchKernelGGL(( dmgeelltmv_kernel<true>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() ,
m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
} else {
hipLaunchKernelGGL(( dmgeelltmv_kernel<false>), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() ,
m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
}
return MAGMA_SUCCESS;
}
| 3bcc6705207899988bb17957e9eee5456373ba74.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from sparse/blas/zmgeelltmv.cu, normal z -> d, Mon Jun 25 18:24:25 2018
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
template<bool betazero>
__global__ void
dmgeelltmv_kernel(
int num_rows,
int num_cols,
int num_vecs,
int num_cols_per_row,
double alpha,
double * dval,
magma_index_t * dcolind,
double * dx,
double beta,
double * dy)
{
extern __shared__ double dot[];
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < num_rows ) {
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_rows * n + row ];
double val = dval [ num_rows * n + row ];
for( int i=0; i<num_vecs; i++ )
dot[ threadIdx.x + i*blockDim.x ] +=
val * dx[col + i * num_cols ];
}
for( int i=0; i<num_vecs; i++ ) {
if (betazero) {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] *alpha;
} else {
dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ]
* alpha + beta * dy [ row + i*num_cols ];
}
}
}
}
/**
Purpose
-------
This routine computes Y = alpha * A * X + beta * Y for X and Y sets of
num_vec vectors on the GPU. Input format is ELL.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
num_vecs mama_int_t
number of vectors
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha double
scalar multiplier
@param[in]
dval magmaDouble_ptr
array containing values of A in ELL
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELL
@param[in]
dx magmaDouble_ptr
input vector x
@param[in]
beta double
scalar multiplier
@param[out]
dy magmaDouble_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dblas
********************************************************************/
extern "C" magma_int_t
magma_dmgeelltmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t num_vecs,
magma_int_t nnz_per_row,
double alpha,
magmaDouble_ptr dval,
magmaIndex_ptr dcolind,
magmaDouble_ptr dx,
double beta,
magmaDouble_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE
* sizeof( double ); // num_vecs vectors
if (beta == MAGMA_D_ZERO) {
dmgeelltmv_kernel<true><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>>
( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
} else {
dmgeelltmv_kernel<false><<< grid, threads, MEM_SIZE, queue->cuda_stream() >>>
( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
}
return MAGMA_SUCCESS;
}
|
9265b6c96e73560ee7c27bd437f0c8c47271222c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This software is Copyright (c) 2011 Lukas Odzioba
* <lukas dot odzioba at gmail dot com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
* Based on Alain Espinosa implementation http://openwall.info/wiki/john/MSCash
*/
#include <stdio.h>
#include "../cuda_mscash.h"
#include "cuda_common.cuh"
extern "C" void cuda_mscash(mscash_password *, mscash_hash *, mscash_salt *);
__constant__ mscash_salt cuda_salt[1];
__device__ static void md4_crypt(uint32_t * output, uint32_t * nt_buffer)
{
unsigned int a = INIT_A;
unsigned int b = INIT_B;
unsigned int c = INIT_C;
unsigned int d = INIT_D;
/* Round 1 */
a += (d ^ (b & (c ^ d))) + nt_buffer[0];
a = (a << 3) | (a >> 29);
d += (c ^ (a & (b ^ c))) + nt_buffer[1];
d = (d << 7) | (d >> 25);
c += (b ^ (d & (a ^ b))) + nt_buffer[2];
c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + nt_buffer[3];
b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + nt_buffer[4];
a = (a << 3) | (a >> 29);
d += (c ^ (a & (b ^ c))) + nt_buffer[5];
d = (d << 7) | (d >> 25);
c += (b ^ (d & (a ^ b))) + nt_buffer[6];
c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + nt_buffer[7];
b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + nt_buffer[8];
a = (a << 3) | (a >> 29);
d += (c ^ (a & (b ^ c))) + nt_buffer[9];
d = (d << 7) | (d >> 25);
c += (b ^ (d & (a ^ b))) + nt_buffer[10];
c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + nt_buffer[11];
b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + nt_buffer[12];
a = (a << 3) | (a >> 29);
d += (c ^ (a & (b ^ c))) + nt_buffer[13];
d = (d << 7) | (d >> 25);
c += (b ^ (d & (a ^ b))) + nt_buffer[14];
c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + nt_buffer[15];
b = (b << 19) | (b >> 13);
/* Round 2 */
a += ((b & (c | d)) | (c & d)) + nt_buffer[0] + SQRT_2;
a = (a << 3) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + nt_buffer[4] + SQRT_2;
d = (d << 5) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + nt_buffer[8] + SQRT_2;
c = (c << 9) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + nt_buffer[12] + SQRT_2;
b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + nt_buffer[1] + SQRT_2;
a = (a << 3) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + nt_buffer[5] + SQRT_2;
d = (d << 5) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + nt_buffer[9] + SQRT_2;
c = (c << 9) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + nt_buffer[13] + SQRT_2;
b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + nt_buffer[2] + SQRT_2;
a = (a << 3) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + nt_buffer[6] + SQRT_2;
d = (d << 5) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + nt_buffer[10] + SQRT_2;
c = (c << 9) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + nt_buffer[14] + SQRT_2;
b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + nt_buffer[3] + SQRT_2;
a = (a << 3) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + nt_buffer[7] + SQRT_2;
d = (d << 5) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + nt_buffer[11] + SQRT_2;
c = (c << 9) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + nt_buffer[15] + SQRT_2;
b = (b << 13) | (b >> 19);
/* Round 3 */
a += (d ^ c ^ b) + nt_buffer[0] + SQRT_3;
a = (a << 3) | (a >> 29);
d += (c ^ b ^ a) + nt_buffer[8] + SQRT_3;
d = (d << 9) | (d >> 23);
c += (b ^ a ^ d) + nt_buffer[4] + SQRT_3;
c = (c << 11) | (c >> 21);
b += (a ^ d ^ c) + nt_buffer[12] + SQRT_3;
b = (b << 15) | (b >> 17);
a += (d ^ c ^ b) + nt_buffer[2] + SQRT_3;
a = (a << 3) | (a >> 29);
d += (c ^ b ^ a) + nt_buffer[10] + SQRT_3;
d = (d << 9) | (d >> 23);
c += (b ^ a ^ d) + nt_buffer[6] + SQRT_3;
c = (c << 11) | (c >> 21);
b += (a ^ d ^ c) + nt_buffer[14] + SQRT_3;
b = (b << 15) | (b >> 17);
a += (d ^ c ^ b) + nt_buffer[1] + SQRT_3;
a = (a << 3) | (a >> 29);
d += (c ^ b ^ a) + nt_buffer[9] + SQRT_3;
d = (d << 9) | (d >> 23);
c += (b ^ a ^ d) + nt_buffer[5] + SQRT_3;
c = (c << 11) | (c >> 21);
b += (a ^ d ^ c) + nt_buffer[13] + SQRT_3;
b = (b << 15) | (b >> 17);
a += (d ^ c ^ b) + nt_buffer[3] + SQRT_3;
a = (a << 3) | (a >> 29);
d += (c ^ b ^ a) + nt_buffer[11] + SQRT_3;
d = (d << 9) | (d >> 23);
c += (b ^ a ^ d) + nt_buffer[7] + SQRT_3;
c = (c << 11) | (c >> 21);
b += (a ^ d ^ c) + nt_buffer[15] + SQRT_3;
b = (b << 15) | (b >> 17);
output[0] = a + INIT_A;
output[1] = b + INIT_B;
output[2] = c + INIT_C;
output[3] = d + INIT_D;
}
__device__ void prepare_key(uint8_t * key, int length, uint32_t * nt_buffer)
{
int i = 0;
for (i = 0; i < 16; i++)
nt_buffer[i] = 0;
for (i = 0; i < length / 2; i++)
nt_buffer[i] = key[2 * i] | (key[2 * i + 1] << 16);
if (length % 2 == 1)
nt_buffer[i] = key[length - 1] | 0x800000;
else
nt_buffer[i] = 0x80;
nt_buffer[14] = length << 4;
}
__device__ void prepare_login(uint8_t * login, int length,
uint32_t * login_buffer)
{
int i = 0;
for (i = 0; i < 12; i++)
login_buffer[i] = 0;
for (i = 0; i < length / 2; i++)
login_buffer[i] = login[2 * i] | (login[2 * i + 1] << 16);
if (length % 2 == 1)
login_buffer[i] = login[length - 1] | 0x800000;
else
login_buffer[i] = 0x80;
login_buffer[10] = (length << 4) + 128;
}
__global__ void mscash_kernel(mscash_password * inbuffer,
mscash_hash * outbuffer)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint8_t *login =(uint8_t*) cuda_salt[0].salt;
uint8_t loginlength = cuda_salt[0].length;
uint8_t *password = inbuffer[idx].v;
uint8_t passwordlength = inbuffer[idx].length;
int i;
uint32_t nt_buffer[16];
uint32_t login_buffer[12];
uint32_t output[4];
prepare_key(password, passwordlength, nt_buffer);
md4_crypt(output, nt_buffer);
memcpy(nt_buffer, output, 4 * 4);
prepare_login(login, loginlength, login_buffer);
memcpy(nt_buffer + 4, login_buffer, 12 * 4);
md4_crypt(output, nt_buffer);
for (i = 0; i < 4; i++)
outbuffer[idx].v[i] = output[i];
}
__host__ void cuda_mscash(mscash_password * inbuffer, mscash_hash * outbuffer,
mscash_salt * host_salt)
{
HANDLE_ERROR(hipMemcpyToSymbol(cuda_salt, host_salt,
sizeof(mscash_salt)));
mscash_password *cuda_inbuffer;
mscash_hash *cuda_outbuffer;
size_t insize = sizeof(mscash_password) * KEYS_PER_CRYPT;
size_t outsize = sizeof(mscash_hash) * KEYS_PER_CRYPT;
HANDLE_ERROR(hipMalloc(&cuda_inbuffer, insize));
HANDLE_ERROR(hipMalloc(&cuda_outbuffer, outsize));
HANDLE_ERROR(hipMemcpy(cuda_inbuffer, inbuffer, insize,
hipMemcpyHostToDevice));
hipLaunchKernelGGL(( mscash_kernel) , dim3(BLOCKS), dim3(THREADS) , 0, 0, cuda_inbuffer, cuda_outbuffer);
HANDLE_ERROR(hipGetLastError());
HANDLE_ERROR(hipMemcpy(outbuffer, cuda_outbuffer, outsize,
hipMemcpyDeviceToHost));
HANDLE_ERROR(hipFree(cuda_inbuffer));
HANDLE_ERROR(hipFree(cuda_outbuffer));
}
| 9265b6c96e73560ee7c27bd437f0c8c47271222c.cu | /*
* This software is Copyright (c) 2011 Lukas Odzioba
* <lukas dot odzioba at gmail dot com>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
* Based on Alain Espinosa implementation http://openwall.info/wiki/john/MSCash
*/
#include <stdio.h>
#include "../cuda_mscash.h"
#include "cuda_common.cuh"
extern "C" void cuda_mscash(mscash_password *, mscash_hash *, mscash_salt *);
__constant__ mscash_salt cuda_salt[1];
__device__ static void md4_crypt(uint32_t * output, uint32_t * nt_buffer)
{
unsigned int a = INIT_A;
unsigned int b = INIT_B;
unsigned int c = INIT_C;
unsigned int d = INIT_D;
/* Round 1 */
a += (d ^ (b & (c ^ d))) + nt_buffer[0];
a = (a << 3) | (a >> 29);
d += (c ^ (a & (b ^ c))) + nt_buffer[1];
d = (d << 7) | (d >> 25);
c += (b ^ (d & (a ^ b))) + nt_buffer[2];
c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + nt_buffer[3];
b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + nt_buffer[4];
a = (a << 3) | (a >> 29);
d += (c ^ (a & (b ^ c))) + nt_buffer[5];
d = (d << 7) | (d >> 25);
c += (b ^ (d & (a ^ b))) + nt_buffer[6];
c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + nt_buffer[7];
b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + nt_buffer[8];
a = (a << 3) | (a >> 29);
d += (c ^ (a & (b ^ c))) + nt_buffer[9];
d = (d << 7) | (d >> 25);
c += (b ^ (d & (a ^ b))) + nt_buffer[10];
c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + nt_buffer[11];
b = (b << 19) | (b >> 13);
a += (d ^ (b & (c ^ d))) + nt_buffer[12];
a = (a << 3) | (a >> 29);
d += (c ^ (a & (b ^ c))) + nt_buffer[13];
d = (d << 7) | (d >> 25);
c += (b ^ (d & (a ^ b))) + nt_buffer[14];
c = (c << 11) | (c >> 21);
b += (a ^ (c & (d ^ a))) + nt_buffer[15];
b = (b << 19) | (b >> 13);
/* Round 2 */
a += ((b & (c | d)) | (c & d)) + nt_buffer[0] + SQRT_2;
a = (a << 3) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + nt_buffer[4] + SQRT_2;
d = (d << 5) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + nt_buffer[8] + SQRT_2;
c = (c << 9) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + nt_buffer[12] + SQRT_2;
b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + nt_buffer[1] + SQRT_2;
a = (a << 3) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + nt_buffer[5] + SQRT_2;
d = (d << 5) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + nt_buffer[9] + SQRT_2;
c = (c << 9) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + nt_buffer[13] + SQRT_2;
b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + nt_buffer[2] + SQRT_2;
a = (a << 3) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + nt_buffer[6] + SQRT_2;
d = (d << 5) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + nt_buffer[10] + SQRT_2;
c = (c << 9) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + nt_buffer[14] + SQRT_2;
b = (b << 13) | (b >> 19);
a += ((b & (c | d)) | (c & d)) + nt_buffer[3] + SQRT_2;
a = (a << 3) | (a >> 29);
d += ((a & (b | c)) | (b & c)) + nt_buffer[7] + SQRT_2;
d = (d << 5) | (d >> 27);
c += ((d & (a | b)) | (a & b)) + nt_buffer[11] + SQRT_2;
c = (c << 9) | (c >> 23);
b += ((c & (d | a)) | (d & a)) + nt_buffer[15] + SQRT_2;
b = (b << 13) | (b >> 19);
/* Round 3 */
a += (d ^ c ^ b) + nt_buffer[0] + SQRT_3;
a = (a << 3) | (a >> 29);
d += (c ^ b ^ a) + nt_buffer[8] + SQRT_3;
d = (d << 9) | (d >> 23);
c += (b ^ a ^ d) + nt_buffer[4] + SQRT_3;
c = (c << 11) | (c >> 21);
b += (a ^ d ^ c) + nt_buffer[12] + SQRT_3;
b = (b << 15) | (b >> 17);
a += (d ^ c ^ b) + nt_buffer[2] + SQRT_3;
a = (a << 3) | (a >> 29);
d += (c ^ b ^ a) + nt_buffer[10] + SQRT_3;
d = (d << 9) | (d >> 23);
c += (b ^ a ^ d) + nt_buffer[6] + SQRT_3;
c = (c << 11) | (c >> 21);
b += (a ^ d ^ c) + nt_buffer[14] + SQRT_3;
b = (b << 15) | (b >> 17);
a += (d ^ c ^ b) + nt_buffer[1] + SQRT_3;
a = (a << 3) | (a >> 29);
d += (c ^ b ^ a) + nt_buffer[9] + SQRT_3;
d = (d << 9) | (d >> 23);
c += (b ^ a ^ d) + nt_buffer[5] + SQRT_3;
c = (c << 11) | (c >> 21);
b += (a ^ d ^ c) + nt_buffer[13] + SQRT_3;
b = (b << 15) | (b >> 17);
a += (d ^ c ^ b) + nt_buffer[3] + SQRT_3;
a = (a << 3) | (a >> 29);
d += (c ^ b ^ a) + nt_buffer[11] + SQRT_3;
d = (d << 9) | (d >> 23);
c += (b ^ a ^ d) + nt_buffer[7] + SQRT_3;
c = (c << 11) | (c >> 21);
b += (a ^ d ^ c) + nt_buffer[15] + SQRT_3;
b = (b << 15) | (b >> 17);
output[0] = a + INIT_A;
output[1] = b + INIT_B;
output[2] = c + INIT_C;
output[3] = d + INIT_D;
}
__device__ void prepare_key(uint8_t * key, int length, uint32_t * nt_buffer)
{
int i = 0;
for (i = 0; i < 16; i++)
nt_buffer[i] = 0;
for (i = 0; i < length / 2; i++)
nt_buffer[i] = key[2 * i] | (key[2 * i + 1] << 16);
if (length % 2 == 1)
nt_buffer[i] = key[length - 1] | 0x800000;
else
nt_buffer[i] = 0x80;
nt_buffer[14] = length << 4;
}
__device__ void prepare_login(uint8_t * login, int length,
uint32_t * login_buffer)
{
int i = 0;
for (i = 0; i < 12; i++)
login_buffer[i] = 0;
for (i = 0; i < length / 2; i++)
login_buffer[i] = login[2 * i] | (login[2 * i + 1] << 16);
if (length % 2 == 1)
login_buffer[i] = login[length - 1] | 0x800000;
else
login_buffer[i] = 0x80;
login_buffer[10] = (length << 4) + 128;
}
__global__ void mscash_kernel(mscash_password * inbuffer,
mscash_hash * outbuffer)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint8_t *login =(uint8_t*) cuda_salt[0].salt;
uint8_t loginlength = cuda_salt[0].length;
uint8_t *password = inbuffer[idx].v;
uint8_t passwordlength = inbuffer[idx].length;
int i;
uint32_t nt_buffer[16];
uint32_t login_buffer[12];
uint32_t output[4];
prepare_key(password, passwordlength, nt_buffer);
md4_crypt(output, nt_buffer);
memcpy(nt_buffer, output, 4 * 4);
prepare_login(login, loginlength, login_buffer);
memcpy(nt_buffer + 4, login_buffer, 12 * 4);
md4_crypt(output, nt_buffer);
for (i = 0; i < 4; i++)
outbuffer[idx].v[i] = output[i];
}
__host__ void cuda_mscash(mscash_password * inbuffer, mscash_hash * outbuffer,
mscash_salt * host_salt)
{
HANDLE_ERROR(cudaMemcpyToSymbol(cuda_salt, host_salt,
sizeof(mscash_salt)));
mscash_password *cuda_inbuffer;
mscash_hash *cuda_outbuffer;
size_t insize = sizeof(mscash_password) * KEYS_PER_CRYPT;
size_t outsize = sizeof(mscash_hash) * KEYS_PER_CRYPT;
HANDLE_ERROR(cudaMalloc(&cuda_inbuffer, insize));
HANDLE_ERROR(cudaMalloc(&cuda_outbuffer, outsize));
HANDLE_ERROR(cudaMemcpy(cuda_inbuffer, inbuffer, insize,
cudaMemcpyHostToDevice));
mscash_kernel <<< BLOCKS, THREADS >>> (cuda_inbuffer, cuda_outbuffer);
HANDLE_ERROR(cudaGetLastError());
HANDLE_ERROR(cudaMemcpy(outbuffer, cuda_outbuffer, outsize,
cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(cuda_inbuffer));
HANDLE_ERROR(cudaFree(cuda_outbuffer));
}
|
ec7166f8f1bf6e7c0201cf0b0946758f02f3ce91.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <iostream>
#include <sstream>
#include <string>
#include <iterator>
#include <math.h>
#include <iomanip>
#include <stdlib.h>
#include "time.h"
#include <algorithm>
#include <iterator>
#include <fstream>
#include <numeric>
// user defined headers
#include <Mesh.h>
#include <Element.h>
#include <dynaInput.h>
#include <Surface.h>
#include <BCManager.h>
#include <HeatSolverManager.h>
#include <vtuWriter.h>
#include <FluxManager.h>
#include <ConvManager.h>
#include <RadManager.h>
#include <DomainManager.h>
#include <MaterialManager.h>
#include <ThermalIsoManager.h>
#include <vtuBinWriter.h>
#include <deviceHeader.h>
using namespace std;
int main(int arg, char *argv[])
{
if (arg != 2)
{
cout << "Missing parameter: Mesh file name" << endl;
exit(1);
return 0;
}
//for clocking
float starttime, endtime, preprocessTimer;
int numSteps, numOut, numOutSteps;
string meshName = argv[1];
Mesh meshObj(meshName);
// Vtk indentation rule
const std::string SecondEleWidth_ = " ";
const std::string ThirdEleWidth_ = " ";
const std::string FourthEleWidth_ = " ";
const std::string FifthEleWidth_ = " ";
const std::string SixthEleWidth_ = " ";
preprocessTimer =0.0;
// get mesh
meshObj.getDomainInfo();
// get user parameters
meshObj.assignParameters();
// Title output
cout << "|==================================================================|\n";
cout << "| Northwestern University Finite Element Software |\n";
cout << "| G.A.M.M.A Software |\n";
cout << "| Generalized Analysis of Multiscale and Multiphysics Applications |\n";
cout << "| |\n";
cout << "| Developers: |\n";
cout << "| Stephen Lin |\n";
cout << "| Jacob Smith |\n";
cout << "| Kevontrez Jones |\n";
cout << "|==================================================================|\n";
cout << "\n\n";
cout << "===============================================================\n";
cout << "\tUser Input Information\n\n";
cout << SecondEleWidth_ << "Reading in Mesh File: " << argv[1] << endl;
cout << SecondEleWidth_ << "Writing out to output database name: " << meshObj.outFileName_ << endl;
cout << SecondEleWidth_ << "Number of probe points selected: " << meshObj.probeNames_.size() << endl;
if (meshObj.isLENS_)
{
cout << SecondEleWidth_ << "Toolpath file name: " << meshObj.toolFileName_ << endl;
}
if (meshObj.calcEnergy_)
{
cout << SecondEleWidth_ << "Energy evaluator file name: " << meshObj.energyFileName_ << endl;
}
cout << SecondEleWidth_ << "Input parameter list: " << endl;
for (map<string,float>::iterator it = meshObj.paramValues_.begin();
it != meshObj.paramValues_.end(); it++)
{
string paramName = it->first;
float value = it->second;
cout << ThirdEleWidth_ << paramName << " \t: " << value << endl;
}//end for(it)
cout << "\n\n";
// Set up domain (connectivity etc.)
DomainManager *domainMgr = new DomainManager(&meshObj);
domainMgr->initializeDomain();
cout << "\n";
cout << "\n";
///////////////////////////////////////////////////////
// Begin simulation setup //
///////////////////////////////////////////////////////
cout << "===============================================================\n";
cout << "TIMING FOR SIMULATION SETUP\n\n";
preprocessTimer = 0.0;
// Create solver managers here
starttime = clock();
HeatSolverManager *heatMgr = new HeatSolverManager(domainMgr, &meshObj);
heatMgr->initializeSystem();
endtime = clock();
preprocessTimer += (float) (endtime - starttime) / CLOCKS_PER_SEC;
cout << "\nTOTAL TIME FOR SIMULATION SETUP\n";
cout << SecondEleWidth_ << preprocessTimer << endl;
cout << "===============================================================\n";
// Calculate outputs/approximate # of timesteps
numSteps = (int)ceil(meshObj.finalTime_/heatMgr->dt_);
numOut = (int)ceil(meshObj.finalTime_/meshObj.outTime_);
numOutSteps = (int)ceil(meshObj.outTime_/heatMgr->dt_);
cout << "\n\n";
cout << "===============================================================\n";
cout << "Mesh and Timestep Statistics:\n";
cout << "\n";
cout << SecondEleWidth_ << "Number of Elements: " << domainMgr->nel_ << endl;
cout << SecondEleWidth_ << "Number of Nodes: " << domainMgr->nn_ << endl;
// cout << SecondEleWidth_ << "Number of Surfaces: " << surfaceList_.size() + staticSurfList_.size() <<endl;
cout << SecondEleWidth_ << "Calculated minimum time step size: " << heatMgr->dt_ << endl;
cout << SecondEleWidth_ << "Approximate # of timesteps: " << numSteps << endl;
cout << SecondEleWidth_ << "Approximate # of outputs: " << numOut << endl;
cout << SecondEleWidth_ << "Approximate # of steps between outputs: " << numOutSteps << endl;
cout << "\n";
cout << "End Mesh and Timestep Statistics:\n";
cout << "===============================================================\n";
cout << "\n\n";
///////////////////////////////////////////////////////
// Time stepping //
///////////////////////////////////////////////////////
float dumpTime = 0.0;
float simStart = clock();
domainMgr->currTime_ = 0.0;
float outTrack = 0.0;
float simTime = meshObj.finalTime_;
int outCt = 0;
string extName = ".vtu";
string outFile = meshObj.outFileName_ + to_string(outCt) + extName;
//Copy element Data to GPU
elementData elemData;
createDataOnDevice(domainMgr, elemData, heatMgr);
initializeStiffnessOnD(elemData);
// set up output manager
vtuBinWriter * vtuMgr = new vtuBinWriter(domainMgr, heatMgr, elemData, outFile);
vtuMgr->execute();
//------------------START TIMER---------------------------------------
//Sta time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Start record
hipEventRecord(start, 0);
//-----------------------------------------------------------------------
// time integrator
while (domainMgr->currTime_ <= simTime)
{
// update time counters
outTrack += heatMgr->dt_;
domainMgr->currTime_ += heatMgr->dt_;
heatMgr->pre_work();
heatMgr->updateCap();
heatMgr->integrateForce();
//heatMgr->heatBCManager_->applyFluxes();
heatMgr->advance_time_step();
heatMgr->post_work();
//----------------------------------------------
clearDeviceData(elemData);
updateMassOnD(elemData, domainMgr);
updateIntForceOnD(elemData, domainMgr);
updateFluxKernel(elemData, domainMgr);
advanceTimeKernel(elemData, domainMgr);
dirichletBCKernel(elemData);
CopyToHost(elemData);
//compareMass(elemData, heatMgr->Mvec_);
//compareStiff(elemData, domainMgr->elementList_);
//compareIntForce(elemData, heatMgr->rhs_);
//compareFlux(elemData, heatMgr->rhs_);
compareTemp(elemData, heatMgr->thetaN_);
//------------------------------------------------
// File Manager
if (outTrack >= meshObj.outTime_)
{
outCt++;
outFile = meshObj.outFileName_ + to_string(outCt) + extName;
starttime = clock();
vtuMgr->execute();
endtime = clock();
outTrack = 0.0;
cout << "===============================================================\n";
cout << left <<
" Output at time: " << setw(43) << domainMgr->currTime_
<< "|" << endl;
cout << left <<
" Percentage done: " << setw(42) << (domainMgr->currTime_/simTime) * 100.0
<< "|" << endl;
cout << left <<
" Timer for outputting files (ascii) " << setw(24)
<< (float) (endtime - starttime) / CLOCKS_PER_SEC
<< "|" << endl;
dumpTime += (float) (endtime - starttime) / CLOCKS_PER_SEC;
cout << "===============================================================\n";
}
// no longer beginning of simulation
if (domainMgr->isInit_)
{
domainMgr->isInit_ = false;
}
}//end for(t)
//----------------------------END TIMER -----------------------------------
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop); // that's our time!
// Clean up:
hipEventDestroy(start);
hipEventDestroy(stop);
std::cout << "Process Took " << (float)elapsedTime / 1000 << " seconds" << std::endl;
//-------------------------------------------------------------------------
CopyToHost(elemData);
FreeDevice(elemData);
//compareMass(elemData, heatMgr->Mvec_);
//compareStiff(elemData, domainMgr->elementList_);
//compareIntForce(elemData, heatMgr->rhs_);
//compareFlux(elemData, heatMgr->rhs_);
//compareTemp(elemData, heatMgr->thetaN_);
outCt++;
vtuMgr->execute();
float simEnd = clock();
cout << "\n\n";
cout << "===============================================================\n";
cout << "Simulation Finished: cpu time statistics:\n";
cout << "\n";
cout << SecondEleWidth_ << "Final simulation time: " << domainMgr->currTime_ << endl;
cout << SecondEleWidth_ << "Total compute time: "
<< (float) (simEnd - simStart) / CLOCKS_PER_SEC << endl;
cout << SecondEleWidth_ << "Total time for dumping data: "
<< dumpTime << endl;
cout << SecondEleWidth_ << "Total time for probing data: "
<< heatMgr->probeTime_ << endl;
cout << "\n";
cout << "End cpu time statistics:\n";
cout << "===============================================================\n";
}
| ec7166f8f1bf6e7c0201cf0b0946758f02f3ce91.cu | #include <vector>
#include <iostream>
#include <sstream>
#include <string>
#include <iterator>
#include <math.h>
#include <iomanip>
#include <stdlib.h>
#include "time.h"
#include <algorithm>
#include <iterator>
#include <fstream>
#include <numeric>
// user defined headers
#include <Mesh.h>
#include <Element.h>
#include <dynaInput.h>
#include <Surface.h>
#include <BCManager.h>
#include <HeatSolverManager.h>
#include <vtuWriter.h>
#include <FluxManager.h>
#include <ConvManager.h>
#include <RadManager.h>
#include <DomainManager.h>
#include <MaterialManager.h>
#include <ThermalIsoManager.h>
#include <vtuBinWriter.h>
#include <deviceHeader.h>
using namespace std;
int main(int arg, char *argv[])
{
if (arg != 2)
{
cout << "Missing parameter: Mesh file name" << endl;
exit(1);
return 0;
}
//for clocking
float starttime, endtime, preprocessTimer;
int numSteps, numOut, numOutSteps;
string meshName = argv[1];
Mesh meshObj(meshName);
// Vtk indentation rule
const std::string SecondEleWidth_ = " ";
const std::string ThirdEleWidth_ = " ";
const std::string FourthEleWidth_ = " ";
const std::string FifthEleWidth_ = " ";
const std::string SixthEleWidth_ = " ";
preprocessTimer =0.0;
// get mesh
meshObj.getDomainInfo();
// get user parameters
meshObj.assignParameters();
// Title output
cout << "|==================================================================|\n";
cout << "| Northwestern University Finite Element Software |\n";
cout << "| G.A.M.M.A Software |\n";
cout << "| Generalized Analysis of Multiscale and Multiphysics Applications |\n";
cout << "| |\n";
cout << "| Developers: |\n";
cout << "| Stephen Lin |\n";
cout << "| Jacob Smith |\n";
cout << "| Kevontrez Jones |\n";
cout << "|==================================================================|\n";
cout << "\n\n";
cout << "===============================================================\n";
cout << "\tUser Input Information\n\n";
cout << SecondEleWidth_ << "Reading in Mesh File: " << argv[1] << endl;
cout << SecondEleWidth_ << "Writing out to output database name: " << meshObj.outFileName_ << endl;
cout << SecondEleWidth_ << "Number of probe points selected: " << meshObj.probeNames_.size() << endl;
if (meshObj.isLENS_)
{
cout << SecondEleWidth_ << "Toolpath file name: " << meshObj.toolFileName_ << endl;
}
if (meshObj.calcEnergy_)
{
cout << SecondEleWidth_ << "Energy evaluator file name: " << meshObj.energyFileName_ << endl;
}
cout << SecondEleWidth_ << "Input parameter list: " << endl;
for (map<string,float>::iterator it = meshObj.paramValues_.begin();
it != meshObj.paramValues_.end(); it++)
{
string paramName = it->first;
float value = it->second;
cout << ThirdEleWidth_ << paramName << " \t: " << value << endl;
}//end for(it)
cout << "\n\n";
// Set up domain (connectivity etc.)
DomainManager *domainMgr = new DomainManager(&meshObj);
domainMgr->initializeDomain();
cout << "\n";
cout << "\n";
///////////////////////////////////////////////////////
// Begin simulation setup //
///////////////////////////////////////////////////////
cout << "===============================================================\n";
cout << "TIMING FOR SIMULATION SETUP\n\n";
preprocessTimer = 0.0;
// Create solver managers here
starttime = clock();
HeatSolverManager *heatMgr = new HeatSolverManager(domainMgr, &meshObj);
heatMgr->initializeSystem();
endtime = clock();
preprocessTimer += (float) (endtime - starttime) / CLOCKS_PER_SEC;
cout << "\nTOTAL TIME FOR SIMULATION SETUP\n";
cout << SecondEleWidth_ << preprocessTimer << endl;
cout << "===============================================================\n";
// Calculate outputs/approximate # of timesteps
numSteps = (int)ceil(meshObj.finalTime_/heatMgr->dt_);
numOut = (int)ceil(meshObj.finalTime_/meshObj.outTime_);
numOutSteps = (int)ceil(meshObj.outTime_/heatMgr->dt_);
cout << "\n\n";
cout << "===============================================================\n";
cout << "Mesh and Timestep Statistics:\n";
cout << "\n";
cout << SecondEleWidth_ << "Number of Elements: " << domainMgr->nel_ << endl;
cout << SecondEleWidth_ << "Number of Nodes: " << domainMgr->nn_ << endl;
// cout << SecondEleWidth_ << "Number of Surfaces: " << surfaceList_.size() + staticSurfList_.size() <<endl;
cout << SecondEleWidth_ << "Calculated minimum time step size: " << heatMgr->dt_ << endl;
cout << SecondEleWidth_ << "Approximate # of timesteps: " << numSteps << endl;
cout << SecondEleWidth_ << "Approximate # of outputs: " << numOut << endl;
cout << SecondEleWidth_ << "Approximate # of steps between outputs: " << numOutSteps << endl;
cout << "\n";
cout << "End Mesh and Timestep Statistics:\n";
cout << "===============================================================\n";
cout << "\n\n";
///////////////////////////////////////////////////////
// Time stepping //
///////////////////////////////////////////////////////
float dumpTime = 0.0;
float simStart = clock();
domainMgr->currTime_ = 0.0;
float outTrack = 0.0;
float simTime = meshObj.finalTime_;
int outCt = 0;
string extName = ".vtu";
string outFile = meshObj.outFileName_ + to_string(outCt) + extName;
//Copy element Data to GPU
elementData elemData;
createDataOnDevice(domainMgr, elemData, heatMgr);
initializeStiffnessOnD(elemData);
// set up output manager
vtuBinWriter * vtuMgr = new vtuBinWriter(domainMgr, heatMgr, elemData, outFile);
vtuMgr->execute();
//------------------START TIMER---------------------------------------
//Sta time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Start record
cudaEventRecord(start, 0);
//-----------------------------------------------------------------------
// time integrator
while (domainMgr->currTime_ <= simTime)
{
// update time counters
outTrack += heatMgr->dt_;
domainMgr->currTime_ += heatMgr->dt_;
heatMgr->pre_work();
heatMgr->updateCap();
heatMgr->integrateForce();
//heatMgr->heatBCManager_->applyFluxes();
heatMgr->advance_time_step();
heatMgr->post_work();
//----------------------------------------------
clearDeviceData(elemData);
updateMassOnD(elemData, domainMgr);
updateIntForceOnD(elemData, domainMgr);
updateFluxKernel(elemData, domainMgr);
advanceTimeKernel(elemData, domainMgr);
dirichletBCKernel(elemData);
CopyToHost(elemData);
//compareMass(elemData, heatMgr->Mvec_);
//compareStiff(elemData, domainMgr->elementList_);
//compareIntForce(elemData, heatMgr->rhs_);
//compareFlux(elemData, heatMgr->rhs_);
compareTemp(elemData, heatMgr->thetaN_);
//------------------------------------------------
// File Manager
if (outTrack >= meshObj.outTime_)
{
outCt++;
outFile = meshObj.outFileName_ + to_string(outCt) + extName;
starttime = clock();
vtuMgr->execute();
endtime = clock();
outTrack = 0.0;
cout << "===============================================================\n";
cout << left <<
" Output at time: " << setw(43) << domainMgr->currTime_
<< "|" << endl;
cout << left <<
" Percentage done: " << setw(42) << (domainMgr->currTime_/simTime) * 100.0
<< "|" << endl;
cout << left <<
" Timer for outputting files (ascii) " << setw(24)
<< (float) (endtime - starttime) / CLOCKS_PER_SEC
<< "|" << endl;
dumpTime += (float) (endtime - starttime) / CLOCKS_PER_SEC;
cout << "===============================================================\n";
}
// no longer beginning of simulation
if (domainMgr->isInit_)
{
domainMgr->isInit_ = false;
}
}//end for(t)
//----------------------------END TIMER -----------------------------------
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop); // that's our time!
// Clean up:
cudaEventDestroy(start);
cudaEventDestroy(stop);
std::cout << "Process Took " << (float)elapsedTime / 1000 << " seconds" << std::endl;
//-------------------------------------------------------------------------
CopyToHost(elemData);
FreeDevice(elemData);
//compareMass(elemData, heatMgr->Mvec_);
//compareStiff(elemData, domainMgr->elementList_);
//compareIntForce(elemData, heatMgr->rhs_);
//compareFlux(elemData, heatMgr->rhs_);
//compareTemp(elemData, heatMgr->thetaN_);
outCt++;
vtuMgr->execute();
float simEnd = clock();
cout << "\n\n";
cout << "===============================================================\n";
cout << "Simulation Finished: cpu time statistics:\n";
cout << "\n";
cout << SecondEleWidth_ << "Final simulation time: " << domainMgr->currTime_ << endl;
cout << SecondEleWidth_ << "Total compute time: "
<< (float) (simEnd - simStart) / CLOCKS_PER_SEC << endl;
cout << SecondEleWidth_ << "Total time for dumping data: "
<< dumpTime << endl;
cout << SecondEleWidth_ << "Total time for probing data: "
<< heatMgr->probeTime_ << endl;
cout << "\n";
cout << "End cpu time statistics:\n";
cout << "===============================================================\n";
}
|
7be83941f33a032c4ab5dbc376513303b20f0ca2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <ctime>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt
#include <roctracer/roctx.h>
//Initialize sizes
const int sizeX = 4096;
const int sizeY = 4096;
using namespace std;
struct DIMS
{
dim3 dimBlock;
dim3 dimGrid;
};
#define CUDA(call) do { \
hipError_t e = (call); \
if (e == hipSuccess) break; \
fprintf(stderr, __FILE__":%d: %s (%d)\n", \
__LINE__, hipGetErrorString(e), e); \
exit(1); \
} while (0)
inline unsigned divup(unsigned n, unsigned div)
{
return (n + div - 1) / div;
}
// Check errors
void postprocess(const float *ref, const float *res, int n)
{
bool passed = true;
for (int i = 0; i < n; i++)
{
if (res[i] != ref[i])
{
printf("ID:%d \t Res:%f \t Ref:%f\n", i, res[i], ref[i]);
printf("%25s\n", "*** FAILED ***");
passed = false;
break;
}
}
if(passed)
printf("Post process check passed!!\n");
}
void preprocess(float *res, float *dev_res, int n)
{
std::fill(res, res + n, -1);
hipMemset(dev_res, -1, n * sizeof(float));
}
__global__ void copyKernel(const float* const a, float* const b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x; // Compute correctly - Global X index
int j = threadIdx.y + blockIdx.y * blockDim.y; // Compute correctly - Global Y index
int index = i + j*sizeX; // Compute 1D index from i, j
b[index] = a[index];
}
__global__ void matrixTransposeNaive(const float* const a, float* const b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x; // Compute correctly - Global X index
int j = threadIdx.y + blockIdx.y * blockDim.y; // Compute correctly - Global Y index
int index_in = i + j*sizeX;; // Compute input index (i,j) from matrix A
int index_out = j + i*sizeY; // Compute output index (j,i) in matrix B = transpose(A)
// Copy data from A to B
b[index_out] = a[index_in];
}
template<int BLOCK_SIZE_X, int BLOCK_SIZE_Y>
__global__ void matrixTransposeShared(const float* const a, float* const b)
{
//Allocate appropriate shared memory
__shared__ float mat[BLOCK_SIZE_Y][BLOCK_SIZE_X];
//Compute input and output index
int bx = blockIdx.x * blockDim.x; // Compute block offset - this is number of global threads in X before this block
int by = blockIdx.y * blockDim.y; // Compute block offset - this is number of global threads in Y before this block
int i = threadIdx.x + bx; // Global input x index - Same as previous kernels
int j = threadIdx.y + by; // Global input y index - Same as previous kernels
int ti = threadIdx.x + by; // Global output x index - remember the transpose
int tj = threadIdx.y + bx; // Global output y index - remember the transpose
//Copy data from input to shared memory
mat[threadIdx.y][threadIdx.x] = a[i+sizeX*j];
__syncthreads();
//Copy data from shared memory to global memory b
b[tj*sizeX+ti] = mat[threadIdx.x][threadIdx.y];
}
__global__ void matrixTransposeDynamicShared(const float* const a, float* const b)
{
//Allocate appropriate shared memory
extern __shared__ float mat[];
//Compute input and output index
int bx = blockIdx.x * blockDim.x; // Compute block offset - this is number of global threads in X before this block
int by = blockIdx.y * blockDim.y; // Compute block offset - this is number of global threads in Y before this block
int i = threadIdx.x + bx; // Global input x index - Same as previous kernels
int j = threadIdx.y + by; // Global input y index - Same as previous kernels
int ti = threadIdx.x + by; // Global output x index - remember the transpose
int tj = threadIdx.y + bx; // Global output y index - remember the transpose
//Copy data from input to shared memory
mat[threadIdx.y*blockDim.x + threadIdx.x] = a[i + sizeX*j];
__syncthreads();
//Copy data from shared memory to global memory b
b[tj*sizeX + ti] = mat[threadIdx.x*blockDim.x + threadIdx.y];
}
int main(int argc, char *argv[])
{
// Host arrays.
float* a = new float[sizeX * sizeY];
float* b = new float[sizeX * sizeY];
float* a_gold = new float[sizeX * sizeY];
float* b_gold = new float[sizeX * sizeY];
// Device arrays
float *d_a, *d_b;
// Allocate memory on the device
CUDA(hipMalloc((void **) &d_a, sizeX * sizeY * sizeof(float)));
CUDA(hipMalloc((void **) &d_b, sizeX * sizeY * sizeof(float)));
// Fill matrix A
for (int i = 0; i < sizeX * sizeY; i++)
a[i] = (float)i;
cout << endl;
// Copy array contents of A from the host (CPU) to the device (GPU)
hipMemcpy(d_a, a, sizeX * sizeY * sizeof(float), hipMemcpyHostToDevice);
//Compute "gold" reference standard
for(int jj = 0; jj < sizeY; jj++)
{
for(int ii = 0; ii < sizeX; ii++)
{
a_gold[jj * sizeX + ii] = a[jj * sizeX + ii];
b_gold[ii * sizeY + jj] = a[jj * sizeX + ii];
}
}
hipDeviceSynchronize();
#define CPU_TRANSPOSE
#ifdef CPU_TRANSPOSE
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***CPU Transpose***" << endl;
{
for (int jj = 0; jj < sizeY; jj++)
for (int ii = 0; ii < sizeX; ii++)
b[ii * sizeX + jj] = a[jj * sizeX + ii];
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Device To Device Copy***" << endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(16, 16, 1);
dims.dimGrid = dim3(sizeX / dims.dimBlock.x,
sizeY / dims.dimBlock.y,
1);
// Launch the GPU kernel
hipLaunchKernelGGL(( copyKernel), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b);
// copy the answer back to the host (CPU) from the device (GPU)
hipMemcpy(b, d_b, sizeX * sizeY * sizeof(float), hipMemcpyDeviceToHost);
postprocess(a_gold, b, sizeX * sizeY);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Naive Transpose***" << endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
// HINT: Look above for copy kernel dims computation
DIMS dims;
dims.dimBlock = dim3(16, 16, 1);
dims.dimGrid = dim3(sizeX / dims.dimBlock.x,
sizeY / dims.dimBlock.y,
1);
// Launch the GPU kernel
hipLaunchKernelGGL(( matrixTransposeNaive), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b);
// copy the answer back to the host (CPU) from the device (GPU)
hipMemcpy(b, d_b, sizeX * sizeY * sizeof(float), hipMemcpyDeviceToHost);
postprocess(b_gold, b, sizeX * sizeY);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Shared Memory Transpose***" << endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(16, 16, 1);
dims.dimGrid = dim3(sizeX / dims.dimBlock.x,
sizeY / dims.dimBlock.y,
1);
// Launch the GPU kernel
hipLaunchKernelGGL(( matrixTransposeShared<16, 16>), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b);
// copy the answer back to the host (CPU) from the device (GPU)
hipMemcpy(b, d_b, sizeX * sizeY * sizeof(float), hipMemcpyDeviceToHost);
postprocess(b_gold, b, sizeX * sizeY);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Shared Memory Transpose with Dynamic Shared Memory***" << endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(16, 16, 1);
dims.dimGrid = dim3(sizeX / dims.dimBlock.x,
sizeY / dims.dimBlock.y,
1);
// Launch the GPU kernel
int sharedMemoryPerBlockInBytes = dims.dimBlock.x * dims.dimBlock.y*sizeof(float); // Compute This
hipLaunchKernelGGL(( matrixTransposeDynamicShared), dim3(dims.dimGrid), dim3(dims.dimBlock), 0, 0, d_a, d_b);
// copy the answer back to the host (CPU) from the device (GPU)
hipMemcpy(b, d_b, sizeX * sizeY * sizeof(float), hipMemcpyDeviceToHost);
postprocess(b_gold, b, sizeX * sizeY);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
// free device memory
hipFree(d_a);
hipFree(d_b);
// free host memory
delete[] a;
delete[] b;
//CUDA Reset for NVProf
CUDA(hipDeviceReset());
// successful program termination
return 0;
}
| 7be83941f33a032c4ab5dbc376513303b20f0ca2.cu | #include <stdio.h>
#include <iostream>
#include <ctime>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//NVTX Dir: C:\Program Files\NVIDIA GPU Computing Toolkit\nvToolsExt
#include <nvToolsExt.h>
//Initialize sizes
const int sizeX = 4096;
const int sizeY = 4096;
using namespace std;
struct DIMS
{
dim3 dimBlock;
dim3 dimGrid;
};
#define CUDA(call) do { \
cudaError_t e = (call); \
if (e == cudaSuccess) break; \
fprintf(stderr, __FILE__":%d: %s (%d)\n", \
__LINE__, cudaGetErrorString(e), e); \
exit(1); \
} while (0)
inline unsigned divup(unsigned n, unsigned div)
{
return (n + div - 1) / div;
}
// Check errors
void postprocess(const float *ref, const float *res, int n)
{
bool passed = true;
for (int i = 0; i < n; i++)
{
if (res[i] != ref[i])
{
printf("ID:%d \t Res:%f \t Ref:%f\n", i, res[i], ref[i]);
printf("%25s\n", "*** FAILED ***");
passed = false;
break;
}
}
if(passed)
printf("Post process check passed!!\n");
}
void preprocess(float *res, float *dev_res, int n)
{
std::fill(res, res + n, -1);
cudaMemset(dev_res, -1, n * sizeof(float));
}
__global__ void copyKernel(const float* const a, float* const b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x; // Compute correctly - Global X index
int j = threadIdx.y + blockIdx.y * blockDim.y; // Compute correctly - Global Y index
int index = i + j*sizeX; // Compute 1D index from i, j
b[index] = a[index];
}
__global__ void matrixTransposeNaive(const float* const a, float* const b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x; // Compute correctly - Global X index
int j = threadIdx.y + blockIdx.y * blockDim.y; // Compute correctly - Global Y index
int index_in = i + j*sizeX;; // Compute input index (i,j) from matrix A
int index_out = j + i*sizeY; // Compute output index (j,i) in matrix B = transpose(A)
// Copy data from A to B
b[index_out] = a[index_in];
}
template<int BLOCK_SIZE_X, int BLOCK_SIZE_Y>
__global__ void matrixTransposeShared(const float* const a, float* const b)
{
//Allocate appropriate shared memory
__shared__ float mat[BLOCK_SIZE_Y][BLOCK_SIZE_X];
//Compute input and output index
int bx = blockIdx.x * blockDim.x; // Compute block offset - this is number of global threads in X before this block
int by = blockIdx.y * blockDim.y; // Compute block offset - this is number of global threads in Y before this block
int i = threadIdx.x + bx; // Global input x index - Same as previous kernels
int j = threadIdx.y + by; // Global input y index - Same as previous kernels
int ti = threadIdx.x + by; // Global output x index - remember the transpose
int tj = threadIdx.y + bx; // Global output y index - remember the transpose
//Copy data from input to shared memory
mat[threadIdx.y][threadIdx.x] = a[i+sizeX*j];
__syncthreads();
//Copy data from shared memory to global memory b
b[tj*sizeX+ti] = mat[threadIdx.x][threadIdx.y];
}
__global__ void matrixTransposeDynamicShared(const float* const a, float* const b)
{
//Allocate appropriate shared memory
extern __shared__ float mat[];
//Compute input and output index
int bx = blockIdx.x * blockDim.x; // Compute block offset - this is number of global threads in X before this block
int by = blockIdx.y * blockDim.y; // Compute block offset - this is number of global threads in Y before this block
int i = threadIdx.x + bx; // Global input x index - Same as previous kernels
int j = threadIdx.y + by; // Global input y index - Same as previous kernels
int ti = threadIdx.x + by; // Global output x index - remember the transpose
int tj = threadIdx.y + bx; // Global output y index - remember the transpose
//Copy data from input to shared memory
mat[threadIdx.y*blockDim.x + threadIdx.x] = a[i + sizeX*j];
__syncthreads();
//Copy data from shared memory to global memory b
b[tj*sizeX + ti] = mat[threadIdx.x*blockDim.x + threadIdx.y];
}
int main(int argc, char *argv[])
{
// Host arrays.
float* a = new float[sizeX * sizeY];
float* b = new float[sizeX * sizeY];
float* a_gold = new float[sizeX * sizeY];
float* b_gold = new float[sizeX * sizeY];
// Device arrays
float *d_a, *d_b;
// Allocate memory on the device
CUDA(cudaMalloc((void **) &d_a, sizeX * sizeY * sizeof(float)));
CUDA(cudaMalloc((void **) &d_b, sizeX * sizeY * sizeof(float)));
// Fill matrix A
for (int i = 0; i < sizeX * sizeY; i++)
a[i] = (float)i;
cout << endl;
// Copy array contents of A from the host (CPU) to the device (GPU)
cudaMemcpy(d_a, a, sizeX * sizeY * sizeof(float), cudaMemcpyHostToDevice);
//Compute "gold" reference standard
for(int jj = 0; jj < sizeY; jj++)
{
for(int ii = 0; ii < sizeX; ii++)
{
a_gold[jj * sizeX + ii] = a[jj * sizeX + ii];
b_gold[ii * sizeY + jj] = a[jj * sizeX + ii];
}
}
cudaDeviceSynchronize();
#define CPU_TRANSPOSE
#ifdef CPU_TRANSPOSE
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***CPU Transpose***" << endl;
{
for (int jj = 0; jj < sizeY; jj++)
for (int ii = 0; ii < sizeX; ii++)
b[ii * sizeX + jj] = a[jj * sizeX + ii];
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
#endif
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Device To Device Copy***" << endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(16, 16, 1);
dims.dimGrid = dim3(sizeX / dims.dimBlock.x,
sizeY / dims.dimBlock.y,
1);
// Launch the GPU kernel
copyKernel<<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b);
// copy the answer back to the host (CPU) from the device (GPU)
cudaMemcpy(b, d_b, sizeX * sizeY * sizeof(float), cudaMemcpyDeviceToHost);
postprocess(a_gold, b, sizeX * sizeY);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Naive Transpose***" << endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
// HINT: Look above for copy kernel dims computation
DIMS dims;
dims.dimBlock = dim3(16, 16, 1);
dims.dimGrid = dim3(sizeX / dims.dimBlock.x,
sizeY / dims.dimBlock.y,
1);
// Launch the GPU kernel
matrixTransposeNaive<<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b);
// copy the answer back to the host (CPU) from the device (GPU)
cudaMemcpy(b, d_b, sizeX * sizeY * sizeof(float), cudaMemcpyDeviceToHost);
postprocess(b_gold, b, sizeX * sizeY);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Shared Memory Transpose***" << endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(16, 16, 1);
dims.dimGrid = dim3(sizeX / dims.dimBlock.x,
sizeY / dims.dimBlock.y,
1);
// Launch the GPU kernel
matrixTransposeShared<16, 16><<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b);
// copy the answer back to the host (CPU) from the device (GPU)
cudaMemcpy(b, d_b, sizeX * sizeY * sizeof(float), cudaMemcpyDeviceToHost);
postprocess(b_gold, b, sizeX * sizeY);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
cout << "******************************************" << endl;
cout << "***Shared Memory Transpose with Dynamic Shared Memory***" << endl;
{
preprocess(b, d_b, sizeX * sizeY);
// Assign a 2D distribution of BS_X x BS_Y x 1 CUDA threads within
// Calculate number of blocks along X and Y in a 2D CUDA "grid"
DIMS dims;
dims.dimBlock = dim3(16, 16, 1);
dims.dimGrid = dim3(sizeX / dims.dimBlock.x,
sizeY / dims.dimBlock.y,
1);
// Launch the GPU kernel
int sharedMemoryPerBlockInBytes = dims.dimBlock.x * dims.dimBlock.y*sizeof(float); // Compute This
matrixTransposeDynamicShared<<<dims.dimGrid, dims.dimBlock>>>(d_a, d_b);
// copy the answer back to the host (CPU) from the device (GPU)
cudaMemcpy(b, d_b, sizeX * sizeY * sizeof(float), cudaMemcpyDeviceToHost);
postprocess(b_gold, b, sizeX * sizeY);
}
cout << "******************************************" << endl;
cout << endl;
////////////////////////////////////////////////////////////
// free device memory
cudaFree(d_a);
cudaFree(d_b);
// free host memory
delete[] a;
delete[] b;
//CUDA Reset for NVProf
CUDA(cudaDeviceReset());
// successful program termination
return 0;
}
|
05efe069fea3b674ba09c6d68b9e9758efca8467.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated d Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
/* The version for tesla can be found in dsymv_tesla.cu */
#define symv_bs 64
#define thread_x 64
#define thread_y 4
#define bank_shift 33
#define quarter_thread_x 16
#define half_thread_x 32
/*******************************************************************************
* Lower case, where n is multiple of block size (symv_bs)
*/
__global__ void
dsymv_kernel_fermi_L_special(
int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx,
double beta,
double * __restrict__ y, int incy,
double * __restrict__ WC)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
double res = MAGMA_D_ZERO;
double res_ = MAGMA_D_ZERO;
double res1 = MAGMA_D_ZERO;
__shared__ double la [quarter_thread_x][thread_x+2];
__shared__ double buff [thread_x];
__shared__ double buff2 [thread_x];
double tr[4];
double b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d * (lda+1);
A += ty_ * lda + tx_;
if ( ty == 0 ) {
buff[tx] = x[0];
} // obtain the vector x store in buff;
tx = tx_; ty = ty_;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_ * 4 + 4); i++) {
if ( i < tx_ )
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * i + tx_];
else
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 ) {
res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_D_ZERO;
}
__syncthreads();
res = MAGMA_D_ZERO;
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
double res2;
res2 = MAGMA_D_ZERO;
if ( ty_ == 1 ) {
res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_D_ZERO;
}
__syncthreads();
res = MAGMA_D_ZERO;
A -= half_thread_x*lda;
res_ = MAGMA_D_ZERO;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_D_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_D_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_D_ZERO;
}
A -= ty_ * lda;
A -= tx_;
A = A - lda * blkc * thread_x;
x = x - blkc * thread_x * incx;
A += 4 * ty * lda;
A += tx;
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
if ( blkc * thread_x >= thread_x ) {
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_D_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x * k + ty*4 + j];
la[j + ty*4][tx] = tr[j] * buff[tx];
}
__syncthreads();
res_ = MAGMA_D_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
res_ += la[tx_][ty_*4+j];
}
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
}
for(int i=thread_x; i < (blkc * thread_x); i += thread_x ) {
res_ = MAGMA_D_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x*k + ty*4+(j)];
la[j + ty*4][tx] = tr[j] * buff[tx];
}
__syncthreads();
res_ = MAGMA_D_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j];
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res = la[0][tx]+ la[1][tx]
+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
dsymv_kernel_fermi_L_generic(
int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx,
double beta,
double * __restrict__ y, int incy,
double * __restrict__ WC,
int m_mod_thread_x)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
double res = MAGMA_D_ZERO;
double res_ = MAGMA_D_ZERO;
double res1 = MAGMA_D_ZERO;
__shared__ double la [quarter_thread_x][thread_x+2];
__shared__ double buff [thread_x];
__shared__ double buff2[thread_x];
double tr[4];
double b[8];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d * (lda+1);
A += lda * ty_;
int trackA;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( ty == 0 ) {
if ( tx > m_mod_thread_x ) {
buff[tx] = MAGMA_D_ZERO;
}
else
buff[tx] = x[0];
}
if ( tx_ > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_;
A += trackA;
}
else {
if ( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx_;
A += trackA;
}
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if ( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_D_MAKE( 9999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
tx = tx_;
ty = ty_;
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_*4+4); i++) {
if ( i < tx_ )
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 ) {
res1 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_D_ZERO;
}
__syncthreads();
res = MAGMA_D_ZERO;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( (tx_+half_thread_x) > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_ + half_thread_x;
A += trackA+half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_D_MAKE( 99999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA+half_thread_x*lda;
A += tx_;
A += half_thread_x + half_thread_x*lda;
}
else {
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
double res2;
res2 = MAGMA_D_ZERO;
if ( ty_ == 1 ) {
res2 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_D_ZERO;
}
__syncthreads();
res = MAGMA_D_ZERO;
res_ = MAGMA_D_ZERO;
A -= half_thread_x*lda;
if ( blkc == ( gridDim.x - 1 ) ) {
A -= tx_;
if ( tx_ > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_;
A += trackA;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
if ( ( ty_ + j ) > m_mod_thread_x ) {
tr[j/8] = MAGMA_D_MAKE( 99999, 0 );
}
else
tr[j/8] = A[ j * lda];
A -= trackA;
A += tx_;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_D_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_D_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_D_ZERO;
}
A -= ty_ * lda;
A -= tx_;
A = A - lda*break_d;
x = x - break_d*incx;
A += 4 * ty * lda;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( tx <= m_mod_thread_x )
A += tx;
else
A += m_mod_thread_x;
}
else{
A += tx;
}
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
#pragma unroll
for(int j=0; j < 4; j++)
b[j] = buff[ty_*4+j];
if ( break_d > 0 )
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_D_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = tr[j];
}
__syncthreads();
res_ = MAGMA_D_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j] * b[j];
b[4+k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
for( int i=thread_x; i < break_d; i += thread_x ) {
res_ = MAGMA_D_ZERO;
count++;
if ( ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = tr[j];
}
__syncthreads();
res_ = MAGMA_D_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j] * b[j];
b[4+k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
__global__ void
dsymv_kernel_fermi_L_update(
int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx,
double beta,
double * __restrict__ y, int incy,
double * __restrict__ WC )
{
#if (__CUDA_ARCH__ >= 200)
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * thread_x + tx;
double Ca;
Ca = MAGMA_D_ZERO;
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*thread_x; i < n; i += thread_x) {
Ca += WC[0];
WC += thread_x;
}
if ( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
#endif /* (__CUDA_ARCH__ >= 200) */
}
extern "C"
void magmablas_dsymv_fermi_L(
magma_int_t n, double alpha,
const double *A, magma_int_t lda,
const double *x, magma_int_t incx,
double beta,
double *y, magma_int_t incy,
double *dwork)
{
magma_int_t blocks = (n - 1)/symv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(thread_x, thread_y, 1);
dim3 threads_u(symv_bs, 1, 1);
/*
* If matrix size is multiple of symv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if ( n % symv_bs == 0 ) {
hipLaunchKernelGGL(( dsymv_kernel_fermi_L_special), dim3(grid), dim3(threads), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
else{
magma_int_t m_mod_thread_x = (n % symv_bs) - 1;
hipLaunchKernelGGL(( dsymv_kernel_fermi_L_generic), dim3(grid), dim3(threads), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork, m_mod_thread_x);
}
hipLaunchKernelGGL(( dsymv_kernel_fermi_L_update), dim3(grid), dim3(threads_u), 0, magma_stream ,
n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
/*************************************************************************
Purpose
=======
magmablas_dsymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
==========
UPLO CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA DOUBLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A DOUBLE PRECISION array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA DOUBLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_dsymv(
char uplo, magma_int_t n,
double alpha,
const double *A, magma_int_t lda,
const double *x, magma_int_t incx,
double beta,
double *y, magma_int_t incy)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sdc] precisions, cublas for [z] precisions.
// NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2.
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "real case not implemented");
//hipblasDsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
magmablas_dsymv_tesla( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#endif
return MAGMA_SUCCESS;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
/* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */
if ( upper ) {
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented");
//hipblasDsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
hipblasDsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy);
#endif
}
else {
double *dwork;
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwork = lda * (blocks + 1);
// TODO deal with error
magma_dmalloc( &dwork, lwork );
magmablas_dsymv_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork );
magma_free( dwork );
}
return MAGMA_SUCCESS;
}
/*************************************************************************
Purpose
=======
magmablas_dsymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
the interface of magmablas_dsymv_work is different from magmablas_dsymv in
the last argument dwork
MAGMA implements dsymv through two steps:
1) perform the multiplication in each thread blocks and put the intermediate value
in a space of device memory which we call working space. dwork is the working space
2) sum the intermediate values and store the final result in y.
The size of dwork is
lda * ceil(n/thread_x)
where thread_x = 64
magamblas_dsymv_work requires users to provide a working space, while magmablas_dsymv is
a wrapper routine allocating the working space inside the routine
and provides the same interface with cublas.
If users need to call dsymv frequently, we suggest using magmablas_dsymv_work instead of magmablas_dsymv,
as the overhead of allocating and free device memory in magmablas_dsymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when matrix size is around 10000.
*/
extern "C"
magma_int_t
magmablas_dsymv_work(
char uplo, magma_int_t n,
double alpha,
const double *A, magma_int_t lda,
const double *x, magma_int_t incx,
double beta,
double *y, magma_int_t incy,
double *dwork, magma_int_t lwork)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sdc] precisions, cublas for [z] precisions.
// NOTE: [z]symv is not implemented in cublas v1, but is in cublas v2.
#if defined(PRECISION_z)
fprintf(stderr, "%s: %s\n", __func__, "real case not implemented");
//hipblasDsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
magmablas_dsymv_tesla_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork );
#endif
return MAGMA_SUCCESS;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
/* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */
if ( upper ) {
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented");
return MAGMA_ERR_NOT_SUPPORTED;
#else
hipblasDsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy);
#endif
}
else {
/* TODO check lwork size! */
magmablas_dsymv_fermi_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
return MAGMA_SUCCESS;
}
| 05efe069fea3b674ba09c6d68b9e9758efca8467.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated d Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
#include "commonblas_d.h"
#define PRECISION_d
/* The version for tesla can be found in dsymv_tesla.cu */
#define symv_bs 64
#define thread_x 64
#define thread_y 4
#define bank_shift 33
#define quarter_thread_x 16
#define half_thread_x 32
/*******************************************************************************
* Lower case, where n is multiple of block size (symv_bs)
*/
__global__ void
dsymv_kernel_fermi_L_special(
int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx,
double beta,
double * __restrict__ y, int incy,
double * __restrict__ WC)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
double res = MAGMA_D_ZERO;
double res_ = MAGMA_D_ZERO;
double res1 = MAGMA_D_ZERO;
__shared__ double la [quarter_thread_x][thread_x+2];
__shared__ double buff [thread_x];
__shared__ double buff2 [thread_x];
double tr[4];
double b[4];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d * (lda+1);
A += ty_ * lda + tx_;
if ( ty == 0 ) {
buff[tx] = x[0];
} // obtain the vector x store in buff;
tx = tx_; ty = ty_;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][ bank_shift * (ty_+j) + tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_ * 4 + 4); i++) {
if ( i < tx_ )
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * i + tx_];
else
la[0][bank_shift * tx_ + i] = la[0][ bank_shift * tx_ + i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift * tx_ + j + ty_ * 4] * buff[j + ty_ * 4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 ) {
res1 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_D_ZERO;
}
__syncthreads();
res = MAGMA_D_ZERO;
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
double res2;
res2 = MAGMA_D_ZERO;
if ( ty_ == 1 ) {
res2 = la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_D_ZERO;
}
__syncthreads();
res = MAGMA_D_ZERO;
A -= half_thread_x*lda;
res_ = MAGMA_D_ZERO;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_D_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]+la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]+la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]+la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]+la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_D_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_D_ZERO;
}
A -= ty_ * lda;
A -= tx_;
A = A - lda * blkc * thread_x;
x = x - blkc * thread_x * incx;
A += 4 * ty * lda;
A += tx;
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
if ( blkc * thread_x >= thread_x ) {
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_D_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x * k + ty*4 + j];
la[j + ty*4][tx] = tr[j] * buff[tx];
}
__syncthreads();
res_ = MAGMA_D_ZERO;
#pragma unroll
for(int j=0; j < 4; j++) {
res_ += la[tx_][ty_*4+j];
}
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
}
for(int i=thread_x; i < (blkc * thread_x); i += thread_x ) {
res_ = MAGMA_D_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff2[ quarter_thread_x*k + ty*4+(j)];
la[j + ty*4][tx] = tr[j] * buff[tx];
}
__syncthreads();
res_ = MAGMA_D_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j];
b[k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res = la[0][tx]+ la[1][tx]
+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
/**************************************************************
* Lower case for generic sizes
*/
__global__ void
dsymv_kernel_fermi_L_generic(
int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx,
double beta,
double * __restrict__ y, int incy,
double * __restrict__ WC,
int m_mod_thread_x)
{
#if (__CUDA_ARCH__ >= 200)
int tx = threadIdx.x;
int ty = threadIdx.y;
int blkc = blockIdx.x;
double res = MAGMA_D_ZERO;
double res_ = MAGMA_D_ZERO;
double res1 = MAGMA_D_ZERO;
__shared__ double la [quarter_thread_x][thread_x+2];
__shared__ double buff [thread_x];
__shared__ double buff2[thread_x];
double tr[4];
double b[8];
int break_d = thread_x * blkc;
const int td = (thread_x * ty) + tx;
int tx_ = td % half_thread_x;
int ty_ = td / half_thread_x;
WC += break_d + tx;
x += (break_d + tx) * incx;
A += break_d * (lda+1);
A += lda * ty_;
int trackA;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( ty == 0 ) {
if ( tx > m_mod_thread_x ) {
buff[tx] = MAGMA_D_ZERO;
}
else
buff[tx] = x[0];
}
if ( tx_ > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_;
A += trackA;
}
else {
if ( ty == 0 ) {
buff[tx] = x[0];
}
trackA = tx_;
A += trackA;
}
// Somehow merging these two if - else creates problem
// It could be a potential bug -- from synchronization or from cuda or compiler
if ( blkc == ( gridDim.x - 1 ) ) {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_D_MAKE( 9999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
tx = tx_;
ty = ty_;
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (ty_*4+4); i++) {
if ( i < tx_ )
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 0 ) {
res1 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_D_ZERO;
}
__syncthreads();
res = MAGMA_D_ZERO;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( (tx_+half_thread_x) > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_ + half_thread_x;
A += trackA+half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
if ( ( ty_ + j+half_thread_x ) > m_mod_thread_x ) {
la[0][bank_shift*(ty_+j)+tx_] = MAGMA_D_MAKE( 99999, 0 );
}
else
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
A -= trackA+half_thread_x*lda;
A += tx_;
A += half_thread_x + half_thread_x*lda;
}
else {
A += half_thread_x + half_thread_x*lda;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8) {
la[0][bank_shift*(ty_+j)+tx_] = A[ j * lda];
}
}
__syncthreads();
#pragma unroll
for(int i=ty_*4; i < (4+ty_*4); i++) {
if ( i < tx_ ) {
la[0][bank_shift*tx_+i] = la[0][bank_shift*i+tx_];
}
else
la[0][bank_shift*tx_+i] = la[0][bank_shift*tx_+i];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x + j + 4 * ty_];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
double res2;
res2 = MAGMA_D_ZERO;
if ( ty_ == 1 ) {
res2 = la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_D_ZERO;
}
__syncthreads();
res = MAGMA_D_ZERO;
res_ = MAGMA_D_ZERO;
A -= half_thread_x*lda;
if ( blkc == ( gridDim.x - 1 ) ) {
A -= tx_;
if ( tx_ > m_mod_thread_x )
trackA = m_mod_thread_x;
else
trackA = tx_;
A += trackA;
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
if ( ( ty_ + j ) > m_mod_thread_x ) {
tr[j/8] = MAGMA_D_MAKE( 99999, 0 );
}
else
tr[j/8] = A[ j * lda];
A -= trackA;
A += tx_;
}
else {
#pragma unroll
for(int j=0; j < half_thread_x; j += 8)
tr[j/8] = A[ j * lda];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j] * buff[ j*8 + ty_];
la[0][bank_shift*(ty_+j*8)+tx_] = tr[j];
}
__syncthreads();
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[0][bank_shift*tx_+j+ty_*4] * buff[half_thread_x +j+ty_*4];
__syncthreads();
la[0][bank_shift*tx_+ty_] = res;
__syncthreads();
if ( ty_ == 1 ) {
res2 = res2
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res2 = MAGMA_D_ZERO;
}
__syncthreads();
la[0][bank_shift*tx_+ty_] = res_;
__syncthreads();
if ( ty_ == 0 ) {
res1 = res1
+ la[0][tx_*bank_shift+0]
+ la[0][tx_*bank_shift+1]
+ la[0][tx_*bank_shift+2]
+ la[0][tx_*bank_shift+3]
+ la[0][tx_*bank_shift+4]
+ la[0][tx_*bank_shift+5]
+ la[0][tx_*bank_shift+6]
+ la[0][tx_*bank_shift+7];
}
else {
res1 = MAGMA_D_ZERO;
}
A -= half_thread_x;
__syncthreads();
tx = threadIdx.x;
ty = threadIdx.y;
if ( ty_ == 0 && ty == 0 )
res = res1;
else if ( ty_ == 1 && ty == 0 )
res = res2;
else {
res = MAGMA_D_ZERO;
}
A -= ty_ * lda;
A -= tx_;
A = A - lda*break_d;
x = x - break_d*incx;
A += 4 * ty * lda;
if ( blkc == ( gridDim.x - 1 ) ) {
if ( tx <= m_mod_thread_x )
A += tx;
else
A += m_mod_thread_x;
}
else{
A += tx;
}
int wc_c = 0;
int count = 0;
tx_ = td % quarter_thread_x;
ty_ = td / quarter_thread_x;
WC -= tx;
WC += tx_;
#pragma unroll
for(int j=0; j < 4; j++)
b[j] = buff[ty_*4+j];
if ( break_d > 0 )
#pragma unroll
for( int i=0; i < thread_x; i += thread_x ) {
res_ = MAGMA_D_ZERO;
count++;
if ( ty == 0 ) {
buff2[tx] = x[i*incx];
}
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = tr[j];
}
__syncthreads();
res_ = MAGMA_D_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j] * b[j];
b[4+k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
for( int i=thread_x; i < break_d; i += thread_x ) {
res_ = MAGMA_D_ZERO;
count++;
if ( ty == 0 )
buff2[tx] = x[i*incx];
__syncthreads();
#pragma unroll
for( int k=0; k < 4; k++ ) {
#pragma unroll
for(int j=0; j < 4; j++)
tr[j] = A[j*lda];
#pragma unroll
for(int j=0; j < 4; j++) {
res += tr[j]*buff2[quarter_thread_x*k + ty*4+(j)];
la[( (j)+ty*4)][tx] = tr[j];
}
__syncthreads();
res_ = MAGMA_D_ZERO;
#pragma unroll
for(int j=0; j < 4; j++)
res_ += la[tx_][ty_*4+j] * b[j];
b[4+k] = res_;
__syncthreads();
A += lda * quarter_thread_x;
}
#pragma unroll
for(int k=0; k < 4; k++) {
la[tx_][ty_+quarter_thread_x*k] = b[4+k];
}
__syncthreads();
if ( ty_ < 4 ) {
int k = ty_*quarter_thread_x;
res_ = la[tx_][0+k] + la[tx_][1+k]
+ la[tx_][2+k] + la[tx_][3+k]
+ la[tx_][4+k] + la[tx_][5+k]
+ la[tx_][6+k] + la[tx_][7+k]
+ la[tx_][8+k] + la[tx_][9+k]
+ la[tx_][10+k]+ la[tx_][11+k]
+ la[tx_][12+k]+ la[tx_][13+k]
+ la[tx_][14+k]+ la[tx_][15+k];
WC[k + wc_c*lda ] = res_;
}
wc_c++;
__syncthreads();
}
WC += tx;
WC -= tx_;
la[ty][tx] = res;
__syncthreads();
if ( ty == 0 ) {
res=la[0][tx]+ la[1][tx]+ la[2][tx]+ la[3][tx];
WC[0+lda*(blkc)] = res;
}
#endif /* (__CUDA_ARCH__ >= 200) */
}
__global__ void
dsymv_kernel_fermi_L_update(
int n, double alpha,
const double * __restrict__ A, int lda,
const double * __restrict__ x, int incx,
double beta,
double * __restrict__ y, int incy,
double * __restrict__ WC )
{
#if (__CUDA_ARCH__ >= 200)
int i;
int tx = threadIdx.x;
int ind = blockIdx.x * thread_x + tx;
double Ca;
Ca = MAGMA_D_ZERO;
WC += ind + lda * blockIdx.x;
for(i = blockIdx.x*thread_x; i < n; i += thread_x) {
Ca += WC[0];
WC += thread_x;
}
if ( ind < n )
y[ind * incy] = beta * y[ind * incy] + alpha * Ca;
#endif /* (__CUDA_ARCH__ >= 200) */
}
extern "C"
void magmablas_dsymv_fermi_L(
magma_int_t n, double alpha,
const double *A, magma_int_t lda,
const double *x, magma_int_t incx,
double beta,
double *y, magma_int_t incy,
double *dwork)
{
magma_int_t blocks = (n - 1)/symv_bs + 1;
dim3 grid(blocks, 1, 1);
dim3 threads(thread_x, thread_y, 1);
dim3 threads_u(symv_bs, 1, 1);
/*
* If matrix size is multiple of symv_bs, we use a specific code.
* otherwise, we call the generic case.
*/
if ( n % symv_bs == 0 ) {
dsymv_kernel_fermi_L_special<<< grid, threads, 0, magma_stream >>>
(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
else{
magma_int_t m_mod_thread_x = (n % symv_bs) - 1;
dsymv_kernel_fermi_L_generic<<< grid, threads, 0, magma_stream >>>
(n, alpha, A, lda, x, incx, beta, y, incy, dwork, m_mod_thread_x);
}
dsymv_kernel_fermi_L_update<<< grid, threads_u, 0, magma_stream >>>
(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
/*************************************************************************
Purpose
=======
magmablas_dsymv performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
Arguments
==========
UPLO CHARACTER*1.
On entry, UPLO specifies whether the upper or lower
triangular part of the array A is to be referenced as
follows:
UPLO = 'U' or 'u' Only the upper triangular part of A
is to be referenced.
UPLO = 'L' or 'l' Only the lower triangular part of A
is to be referenced.
Unchanged on exit.
N INTEGER.
On entry, N specifies the order of the matrix A.
N must be at least zero.
Unchanged on exit.
ALPHA DOUBLE PRECISION.
On entry, ALPHA specifies the scalar alpha.
Unchanged on exit.
A DOUBLE PRECISION array of DIMENSION ( LDA, n ).
Before entry with UPLO = 'U' or 'u', the leading n by n
upper triangular part of the array A must contain the upper
triangular part of the symmetric matrix and the strictly
lower triangular part of A is not referenced.
Before entry with UPLO = 'L' or 'l', the leading n by n
lower triangular part of the array A must contain the lower
triangular part of the symmetric matrix and the strictly
upper triangular part of A is not referenced.
Note that the imaginary parts of the diagonal elements need
not be set and are assumed to be zero.
Unchanged on exit.
LDA INTEGER.
On entry, LDA specifies the first dimension of A as declared
in the calling (sub) program. LDA must be at least
max( 1, n ).
Unchanged on exit.
It is recommended that lda is multiple of 16. Otherwise
performance would be deteriorated as the memory accesses
would not be fully coalescent.
X DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCX ) ).
Before entry, the incremented array X must contain the n
element vector x.
Unchanged on exit.
INCX INTEGER.
On entry, INCX specifies the increment for the elements of
X. INCX must not be zero.
Unchanged on exit.
BETA DOUBLE PRECISION.
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
Unchanged on exit.
Y DOUBLE PRECISION array of dimension at least
( 1 + ( n - 1 )*abs( INCY ) ).
Before entry, the incremented array Y must contain the n
element vector y. On exit, Y is overwritten by the updated
vector y.
INCY INTEGER.
On entry, INCY specifies the increment for the elements of
Y. INCY must not be zero.
Unchanged on exit.
*/
extern "C"
magma_int_t
magmablas_dsymv(
char uplo, magma_int_t n,
double alpha,
const double *A, magma_int_t lda,
const double *x, magma_int_t incx,
double beta,
double *y, magma_int_t incy)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sdc] precisions, cublas for [z] precisions.
// NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2.
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "real case not implemented");
//cublasDsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
magmablas_dsymv_tesla( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#endif
return MAGMA_SUCCESS;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
/* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */
if ( upper ) {
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented");
//cublasDsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
cublasDsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy);
#endif
}
else {
double *dwork;
magma_int_t blocks = (n - 1)/thread_x + 1;
magma_int_t lwork = lda * (blocks + 1);
// TODO deal with error
magma_dmalloc( &dwork, lwork );
magmablas_dsymv_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork );
magma_free( dwork );
}
return MAGMA_SUCCESS;
}
/*************************************************************************
Purpose
=======
magmablas_dsymv_work performs the matrix-vector operation:
y := alpha*A*x + beta*y,
where alpha and beta are scalars, x and y are n element vectors and
A is an n by n symmetric matrix.
the interface of magmablas_dsymv_work is different from magmablas_dsymv in
the last argument dwork
MAGMA implements dsymv through two steps:
1) perform the multiplication in each thread blocks and put the intermediate value
in a space of device memory which we call working space. dwork is the working space
2) sum the intermediate values and store the final result in y.
The size of dwork is
lda * ceil(n/thread_x)
where thread_x = 64
magamblas_dsymv_work requires users to provide a working space, while magmablas_dsymv is
a wrapper routine allocating the working space inside the routine
and provides the same interface with cublas.
If users need to call dsymv frequently, we suggest using magmablas_dsymv_work instead of magmablas_dsymv,
as the overhead of allocating and free device memory in magmablas_dsymv would hurt performance.
Our tests show that this penalty is about 10 Gflop/s when matrix size is around 10000.
*/
extern "C"
magma_int_t
magmablas_dsymv_work(
char uplo, magma_int_t n,
double alpha,
const double *A, magma_int_t lda,
const double *x, magma_int_t incx,
double beta,
double *y, magma_int_t incy,
double *dwork, magma_int_t lwork)
{
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x version
// magmablas for [sdc] precisions, cublas for [z] precisions.
// NOTE: [z]symv is not implemented in cublas v1, but is in cublas v2.
#if defined(PRECISION_z)
fprintf(stderr, "%s: %s\n", __func__, "real case not implemented");
//cublasDsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy );
#else
magmablas_dsymv_tesla_work( uplo, n, alpha, A, lda, x, incx, beta, y, incy, dwork, lwork );
#endif
return MAGMA_SUCCESS;
}
// --------------------
// CUDA ARCH 2.x (Fermi) version
char uplo_[2] = {uplo, 0};
int upper = lapackf77_lsame(uplo_, "U");
/*
* Test the input parameters.
*/
if ( (! upper) && (! lapackf77_lsame(uplo_, "L")) ) {
return -1;
} else if ( n < 0 ) {
return -2;
} else if ( lda < max(1,n) ) {
return -5;
} else if ( incx == 0 ) {
return -7;
} else if ( incy == 0 ) {
return -10;
}
/*
* Quick return if possible.
*/
if ( (n == 0) || ( MAGMA_D_EQUAL(alpha, MAGMA_D_ZERO) && MAGMA_D_EQUAL(beta, MAGMA_D_ONE) ) )
return MAGMA_SUCCESS;
/* TODO: Upper case is not implemented in MAGMA */
/* NOTE: [cz]symv are not implemented in cublas v1, but are in cublas v2. */
if ( upper ) {
#if defined(PRECISION_z) || defined(PRECISION_c)
fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented");
return MAGMA_ERR_NOT_SUPPORTED;
#else
cublasDsymv(uplo, n, alpha, A, lda, x, incx, beta, y, incy);
#endif
}
else {
/* TODO check lwork size! */
magmablas_dsymv_fermi_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork);
}
return MAGMA_SUCCESS;
}
|
f15919daeea1c08ecfdc2c773183eff907649a90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
This work is part of the Core Imaging Library developed by
Visual Analytics and Imaging System Group of the Science Technology
Facilities Council, STFC
Copyright 2017 Daniil Kazantsev
Copyright 2017 Srikanth Nagella, Edoardo Pasca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "LLT_ROF_GPU_core.h"
#include "shared.h"
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
/* CUDA implementation of Lysaker, Lundervold and Tai (LLT) model [1] combined with Rudin-Osher-Fatemi [2] TV regularisation penalty.
*
* This penalty can deliver visually pleasant piecewise-smooth recovery if regularisation parameters are selected well.
* The rule of thumb for selection is to start with lambdaLLT = 0 (just the ROF-TV model) and then proceed to increase
* lambdaLLT starting with smaller values.
*
* Input Parameters:
* 1. U0 - original noise image/volume
* 2. lambdaROF - ROF-related regularisation parameter
* 3. lambdaLLT - LLT-related regularisation parameter
* 4. iter - iterations number (for both models)
* 5. tau - time-marching step
* 6. eplsilon: tolerance constant
* Output:
* [1] Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* References:
* [1] Lysaker, M., Lundervold, A. and Tai, X.C., 2003. Noise removal using fourth-order partial differential equation with applications to medical magnetic resonance images in space and time. IEEE Transactions on image processing, 12(12), pp.1579-1590.
* [2] Rudin, Osher, Fatemi, "Nonlinear Total Variation based noise removal algorithms"
*/
#define BLKXSIZE 8
#define BLKYSIZE 8
#define BLKZSIZE 8
#define BLKXSIZE2D 16
#define BLKYSIZE2D 16
#define EPS_LLT 1.0e-12
#define EPS_ROF 1.0e-12
#define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) )
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
__host__ __device__ int signLLT (float x)
{
return (x > 0) - (x < 0);
}
/*************************************************************************/
/**********************LLT-related functions *****************************/
/*************************************************************************/
__global__ void der2D_LLT_kernel(float *U, float *D1, float *D2, int dimX, int dimY)
{
int i_p, i_m, j_m, j_p;
float dxx, dyy, denom_xx, denom_yy;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int index = i + dimX*j;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) {
/* symmetric boundary conditions (Neuman) */
i_p = i + 1; if (i_p == dimX) i_p = i - 1;
i_m = i - 1; if (i_m < 0) i_m = i + 1;
j_p = j + 1; if (j_p == dimY) j_p = j - 1;
j_m = j - 1; if (j_m < 0) j_m = j + 1;
dxx = U[j*dimX+i_p] - 2.0f*U[index] + U[j*dimX+i_m];
dyy = U[j_p*dimX+i] - 2.0f*U[index] + U[j_m*dimX+i];
denom_xx = abs(dxx) + EPS_LLT;
denom_yy = abs(dyy) + EPS_LLT;
D1[index] = dxx / denom_xx;
D2[index] = dyy / denom_yy;
}
}
__global__ void der3D_LLT_kernel(float* U, float *D1, float *D2, float *D3, int dimX, int dimY, int dimZ)
{
int i_p, i_m, j_m, j_p, k_p, k_m;
float dxx, dyy, dzz, denom_xx, denom_yy, denom_zz;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) {
/* symmetric boundary conditions (Neuman) */
i_p = i + 1; if (i_p == dimX) i_p = i - 1;
i_m = i - 1; if (i_m < 0) i_m = i + 1;
j_p = j + 1; if (j_p == dimY) j_p = j - 1;
j_m = j - 1; if (j_m < 0) j_m = j + 1;
k_p = k + 1; if (k_p == dimZ) k_p = k - 1;
k_m = k - 1; if (k_m < 0) k_m = k + 1;
int index = (dimX*dimY)*k + j*dimX+i;
dxx = U[(dimX*dimY)*k + j*dimX+i_p] - 2.0f*U[index] + U[(dimX*dimY)*k + j*dimX+i_m];
dyy = U[(dimX*dimY)*k + j_p*dimX+i] - 2.0f*U[index] + U[(dimX*dimY)*k + j_m*dimX+i];
dzz = U[(dimX*dimY)*k_p + j*dimX+i] - 2.0f*U[index] + U[(dimX*dimY)*k_m + j*dimX+i];
denom_xx = abs(dxx) + EPS_LLT;
denom_yy = abs(dyy) + EPS_LLT;
denom_zz = abs(dzz) + EPS_LLT;
D1[index] = dxx / denom_xx;
D2[index] = dyy / denom_yy;
D3[index] = dzz / denom_zz;
}
}
/*************************************************************************/
/**********************ROF-related functions *****************************/
/*************************************************************************/
/* first-order differences 1 */
__global__ void D1_func2D_ROF_kernel(float* Input, float* D1, int N, int M)
{
int i1, j1, i2;
float NOMx_1,NOMy_1,NOMy_0,denom1,denom2,T1;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int index = i + N*j;
if ((i >= 0) && (i < N) && (j >= 0) && (j < M)) {
/* boundary conditions (Neumann reflections) */
i1 = i + 1; if (i1 >= N) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= M) j1 = j-1;
/* Forward-backward differences */
NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */
NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */
NOMy_0 = Input[index] - Input[j*N + i2]; /* y- */
denom1 = NOMx_1*NOMx_1;
denom2 = 0.5f*(signLLT((float)NOMy_1) + signLLT((float)NOMy_0))*(MIN(abs((float)NOMy_1),abs((float)NOMy_0)));
denom2 = denom2*denom2;
T1 = sqrt(denom1 + denom2 + EPS_ROF);
D1[index] = NOMx_1/T1;
}
}
/* differences 2 */
__global__ void D2_func2D_ROF_kernel(float* Input, float* D2, int N, int M)
{
int i1, j1, j2;
float NOMx_1,NOMy_1,NOMx_0,denom1,denom2,T2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int index = i + N*j;
if ((i >= 0) && (i < (N)) && (j >= 0) && (j < (M))) {
/* boundary conditions (Neumann reflections) */
i1 = i + 1; if (i1 >= N) i1 = i-1;
j1 = j + 1; if (j1 >= M) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
/* Forward-backward differences */
NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */
NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */
NOMx_0 = Input[index] - Input[j2*N + i]; /* x- */
denom1 = NOMy_1*NOMy_1;
denom2 = 0.5f*(signLLT((float)NOMx_1) + signLLT((float)NOMx_0))*(MIN(abs((float)NOMx_1),abs((float)NOMx_0)));
denom2 = denom2*denom2;
T2 = sqrt(denom1 + denom2 + EPS_ROF);
D2[index] = NOMy_1/T2;
}
}
/* differences 1 */
__global__ void D1_func3D_ROF_kernel(float* Input, float* D1, int dimX, int dimY, int dimZ)
{
float NOMx_1, NOMy_1, NOMy_0, NOMz_1, NOMz_0, denom1, denom2,denom3, T1;
int i1,i2,k1,j1,j2,k2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (dimX*dimY)*k + j*dimX+i;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) {
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = Input[(dimX*dimY)*k + j1*dimX + i] - Input[index]; /* x+ */
NOMy_1 = Input[(dimX*dimY)*k + j*dimX + i1] - Input[index]; /* y+ */
NOMy_0 = Input[index] - Input[(dimX*dimY)*k + j*dimX + i2]; /* y- */
NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */
NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + j*dimX + i]; /* z- */
denom1 = NOMx_1*NOMx_1;
denom2 = 0.5*(signLLT(NOMy_1) + signLLT(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0)));
denom2 = denom2*denom2;
denom3 = 0.5*(signLLT(NOMz_1) + signLLT(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0)));
denom3 = denom3*denom3;
T1 = sqrt(denom1 + denom2 + denom3 + EPS_ROF);
D1[index] = NOMx_1/T1;
}
}
/* differences 2 */
__global__ void D2_func3D_ROF_kernel(float* Input, float* D2, int dimX, int dimY, int dimZ)
{
float NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2;
int i1,i2,k1,j1,j2,k2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (dimX*dimY)*k + j*dimX+i;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) {
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */
NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */
NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */
NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */
NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + (j)*dimX + i]; /* z- */
denom1 = NOMy_1*NOMy_1;
denom2 = 0.5*(signLLT(NOMx_1) + signLLT(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0)));
denom2 = denom2*denom2;
denom3 = 0.5*(signLLT(NOMz_1) + signLLT(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0)));
denom3 = denom3*denom3;
T2 = sqrt(denom1 + denom2 + denom3 + EPS_ROF);
D2[index] = NOMy_1/T2;
}
}
/* differences 3 */
__global__ void D3_func3D_ROF_kernel(float* Input, float* D3, int dimX, int dimY, int dimZ)
{
float NOMx_1, NOMy_1, NOMx_0, NOMy_0, NOMz_1, denom1, denom2, denom3, T3;
int i1,i2,k1,j1,j2,k2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (dimX*dimY)*k + j*dimX+i;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) {
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */
NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */
NOMy_0 = Input[index] - Input[(dimX*dimY)*k + (j)*dimX + i2]; /* y- */
NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */
NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */
denom1 = NOMz_1*NOMz_1;
denom2 = 0.5*(signLLT(NOMx_1) + signLLT(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0)));
denom2 = denom2*denom2;
denom3 = 0.5*(signLLT(NOMy_1) + signLLT(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0)));
denom3 = denom3*denom3;
T3 = sqrt(denom1 + denom2 + denom3 + EPS_ROF);
D3[index] = NOMz_1/T3;
}
}
/*************************************************************************/
/**********************ROF-LLT-related functions *************************/
/*************************************************************************/
__global__ void Update2D_LLT_ROF_kernel(float *U0, float *U, float *D1_LLT, float *D2_LLT, float *D1_ROF, float *D2_ROF, float lambdaROF, float lambdaLLT, float tau, int dimX, int dimY)
{
int i_p, i_m, j_m, j_p;
float div, laplc, dxx, dyy, dv1, dv2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int index = i + dimX*j;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) {
/* symmetric boundary conditions (Neuman) */
i_p = i + 1; if (i_p == dimX) i_p = i - 1;
i_m = i - 1; if (i_m < 0) i_m = i + 1;
j_p = j + 1; if (j_p == dimY) j_p = j - 1;
j_m = j - 1; if (j_m < 0) j_m = j + 1;
index = j*dimX+i;
/*LLT-related part*/
dxx = D1_LLT[j*dimX+i_p] - 2.0f*D1_LLT[index] + D1_LLT[j*dimX+i_m];
dyy = D2_LLT[j_p*dimX+i] - 2.0f*D2_LLT[index] + D2_LLT[j_m*dimX+i];
laplc = dxx + dyy; /*build Laplacian*/
/*ROF-related part*/
dv1 = D1_ROF[index] - D1_ROF[j_m*dimX + i];
dv2 = D2_ROF[index] - D2_ROF[j*dimX + i_m];
div = dv1 + dv2; /*build Divirgent*/
/*combine all into one cost function to minimise */
U[index] += tau*(lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index]));
}
}
__global__ void Update3D_LLT_ROF_kernel(float *U0, float *U, float *D1_LLT, float *D2_LLT, float *D3_LLT, float *D1_ROF, float *D2_ROF, float *D3_ROF, float lambdaROF, float lambdaLLT, float tau, int dimX, int dimY, int dimZ)
{
int i_p, i_m, j_m, j_p, k_p, k_m;
float div, laplc, dxx, dyy, dzz, dv1, dv2, dv3;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) {
/* symmetric boundary conditions (Neuman) */
i_p = i + 1; if (i_p == dimX) i_p = i - 1;
i_m = i - 1; if (i_m < 0) i_m = i + 1;
j_p = j + 1; if (j_p == dimY) j_p = j - 1;
j_m = j - 1; if (j_m < 0) j_m = j + 1;
k_p = k + 1; if (k_p == dimZ) k_p = k - 1;
k_m = k - 1; if (k_m < 0) k_m = k + 1;
int index = (dimX*dimY)*k + j*dimX+i;
/*LLT-related part*/
dxx = D1_LLT[(dimX*dimY)*k + j*dimX+i_p] - 2.0f*D1_LLT[index] + D1_LLT[(dimX*dimY)*k + j*dimX+i_m];
dyy = D2_LLT[(dimX*dimY)*k + j_p*dimX+i] - 2.0f*D2_LLT[index] + D2_LLT[(dimX*dimY)*k + j_m*dimX+i];
dzz = D3_LLT[(dimX*dimY)*k_p + j*dimX+i] - 2.0f*D3_LLT[index] + D3_LLT[(dimX*dimY)*k_m + j*dimX+i];
laplc = dxx + dyy + dzz; /*build Laplacian*/
/*ROF-related part*/
dv1 = D1_ROF[index] - D1_ROF[(dimX*dimY)*k + j_m*dimX+i];
dv2 = D2_ROF[index] - D2_ROF[(dimX*dimY)*k + j*dimX+i_m];
dv3 = D3_ROF[index] - D3_ROF[(dimX*dimY)*k_m + j*dimX+i];
div = dv1 + dv2 + dv3; /*build Divirgent*/
/*combine all into one cost function to minimise */
U[index] += tau*(lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index]));
}
}
__global__ void ROFLLTcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
int index = xIndex + N*yIndex;
if (index < num_total) {
Output[index] = Input[index];
}
}
__global__ void ROFLLTResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
int index = xIndex + N*yIndex;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
__global__ void ROFLLTcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if (index < num_total) {
Output[index] = Input[index];
}
}
__global__ void ROFLLTResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
/*******************************************************************/
/************************ HOST FUNCTION ****************************/
/*******************************************************************/
extern "C" int LLT_ROF_GPU_main(float *Input, float *Output, float *infovector, float lambdaROF, float lambdaLLT, int iterationsNumb, float tau, float epsil, int N, int M, int Z)
{
int deviceCount = -1; // number of devices
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "No CUDA devices found\n");
return -1;
}
float re;
re = 0.0f;
int DimTotal,count,n;
count = 0; n = 0;
float *d_input, *d_update;
float *D1_LLT=NULL, *D2_LLT=NULL, *D1_ROF=NULL, *D2_ROF=NULL, *d_update_prev=NULL;
if (Z == 0) {Z = 1;}
DimTotal = N*M*Z;
CHECK(hipMalloc((void**)&d_input,DimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&d_update,DimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&D1_LLT,DimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&D2_LLT,DimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&D1_ROF,DimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&D2_ROF,DimTotal*sizeof(float)));
CHECK(hipMemcpy(d_input,Input,DimTotal*sizeof(float),hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_update,Input,DimTotal*sizeof(float),hipMemcpyHostToDevice))
if (epsil != 0.0f) checkCudaErrors( hipMalloc((void**)&d_update_prev,DimTotal*sizeof(float)) );;
if (Z == 1) {
// TV - 2D case
dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D);
dim3 dimGrid(idivup(N,BLKXSIZE2D), idivup(M,BLKYSIZE2D));
for(n=0; n < iterationsNumb; n++) {
if ((epsil != 0.0f) && (n % 5 == 0)) {
hipLaunchKernelGGL(( ROFLLTcopy_kernel2D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, N, M, DimTotal);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
}
/****************ROF******************/
/* calculate first-order differences */
hipLaunchKernelGGL(( D1_func2D_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D1_ROF, N, M);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( D2_func2D_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D2_ROF, N, M);
CHECK(hipDeviceSynchronize());
/****************LLT******************/
/* estimate second-order derrivatives */
hipLaunchKernelGGL(( der2D_LLT_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D1_LLT, D2_LLT, N, M);
/* Joint update for ROF and LLT models */
hipLaunchKernelGGL(( Update2D_LLT_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, D1_LLT, D2_LLT, D1_ROF, D2_ROF, lambdaROF, lambdaLLT, tau, N, M);
CHECK(hipDeviceSynchronize());
if ((epsil != 0.0f) && (n % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
hipLaunchKernelGGL(( ROFLLTResidCalc2D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, D1_ROF, N, M, DimTotal);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors( hipPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(D1_ROF, D1_ROF + DimTotal);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_update, d_update + DimTotal);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
}
else {
// 3D case
dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE);
dim3 dimGrid(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE),idivup(Z,BLKXSIZE));
float *D3_LLT=NULL, *D3_ROF=NULL;
CHECK(hipMalloc((void**)&D3_LLT,DimTotal*sizeof(float)));
CHECK(hipMalloc((void**)&D3_ROF,DimTotal*sizeof(float)));
for(n=0; n < iterationsNumb; n++) {
if ((epsil != 0.0f) && (n % 5 == 0)) {
hipLaunchKernelGGL(( ROFLLTcopy_kernel3D), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, N, M, Z, DimTotal);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors(hipPeekAtLastError() );
}
/****************ROF******************/
/* calculate first-order differences */
hipLaunchKernelGGL(( D1_func3D_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D1_ROF, N, M, Z);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( D2_func3D_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D2_ROF, N, M, Z);
CHECK(hipDeviceSynchronize());
hipLaunchKernelGGL(( D3_func3D_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D3_ROF, N, M, Z);
CHECK(hipDeviceSynchronize());
/****************LLT******************/
/* estimate second-order derrivatives */
hipLaunchKernelGGL(( der3D_LLT_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, D1_LLT, D2_LLT, D3_LLT, N, M, Z);
/* Joint update for ROF and LLT models */
hipLaunchKernelGGL(( Update3D_LLT_ROF_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_input, d_update, D1_LLT, D2_LLT, D3_LLT, D1_ROF, D2_ROF, D3_ROF, lambdaROF, lambdaLLT, tau, N, M, Z);
CHECK(hipDeviceSynchronize());
if ((epsil != 0.0f) && (n % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
hipLaunchKernelGGL(( ROFLLTResidCalc3D_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, d_update, d_update_prev, D1_ROF, N, M, Z, DimTotal);
checkCudaErrors( hipDeviceSynchronize() );
checkCudaErrors( hipPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(D1_ROF, D1_ROF + DimTotal);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_update, d_update + DimTotal);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
CHECK(hipFree(D3_LLT));
CHECK(hipFree(D3_ROF));
} /*end of else */
CHECK(hipMemcpy(Output,d_update,DimTotal*sizeof(float),hipMemcpyDeviceToHost));
CHECK(hipFree(d_input));
CHECK(hipFree(d_update));
if (epsil != 0.0f) hipFree(d_update_prev);
CHECK(hipFree(D1_LLT));
CHECK(hipFree(D2_LLT));
CHECK(hipFree(D1_ROF));
CHECK(hipFree(D2_ROF));
infovector[0] = (float)(n); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
return 0;
}
| f15919daeea1c08ecfdc2c773183eff907649a90.cu | /*
This work is part of the Core Imaging Library developed by
Visual Analytics and Imaging System Group of the Science Technology
Facilities Council, STFC
Copyright 2017 Daniil Kazantsev
Copyright 2017 Srikanth Nagella, Edoardo Pasca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "LLT_ROF_GPU_core.h"
#include "shared.h"
#include <thrust/functional.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
/* CUDA implementation of Lysaker, Lundervold and Tai (LLT) model [1] combined with Rudin-Osher-Fatemi [2] TV regularisation penalty.
*
* This penalty can deliver visually pleasant piecewise-smooth recovery if regularisation parameters are selected well.
* The rule of thumb for selection is to start with lambdaLLT = 0 (just the ROF-TV model) and then proceed to increase
* lambdaLLT starting with smaller values.
*
* Input Parameters:
* 1. U0 - original noise image/volume
* 2. lambdaROF - ROF-related regularisation parameter
* 3. lambdaLLT - LLT-related regularisation parameter
* 4. iter - iterations number (for both models)
* 5. tau - time-marching step
* 6. eplsilon: tolerance constant
* Output:
* [1] Filtered/regularized image/volume
* [2] Information vector which contains [iteration no., reached tolerance]
*
* References:
* [1] Lysaker, M., Lundervold, A. and Tai, X.C., 2003. Noise removal using fourth-order partial differential equation with applications to medical magnetic resonance images in space and time. IEEE Transactions on image processing, 12(12), pp.1579-1590.
* [2] Rudin, Osher, Fatemi, "Nonlinear Total Variation based noise removal algorithms"
*/
#define BLKXSIZE 8
#define BLKYSIZE 8
#define BLKZSIZE 8
#define BLKXSIZE2D 16
#define BLKYSIZE2D 16
#define EPS_LLT 1.0e-12
#define EPS_ROF 1.0e-12
#define idivup(a, b) ( ((a)%(b) != 0) ? (a)/(b)+1 : (a)/(b) )
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
__host__ __device__ int signLLT (float x)
{
return (x > 0) - (x < 0);
}
/*************************************************************************/
/**********************LLT-related functions *****************************/
/*************************************************************************/
__global__ void der2D_LLT_kernel(float *U, float *D1, float *D2, int dimX, int dimY)
{
int i_p, i_m, j_m, j_p;
float dxx, dyy, denom_xx, denom_yy;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int index = i + dimX*j;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) {
/* symmetric boundary conditions (Neuman) */
i_p = i + 1; if (i_p == dimX) i_p = i - 1;
i_m = i - 1; if (i_m < 0) i_m = i + 1;
j_p = j + 1; if (j_p == dimY) j_p = j - 1;
j_m = j - 1; if (j_m < 0) j_m = j + 1;
dxx = U[j*dimX+i_p] - 2.0f*U[index] + U[j*dimX+i_m];
dyy = U[j_p*dimX+i] - 2.0f*U[index] + U[j_m*dimX+i];
denom_xx = abs(dxx) + EPS_LLT;
denom_yy = abs(dyy) + EPS_LLT;
D1[index] = dxx / denom_xx;
D2[index] = dyy / denom_yy;
}
}
__global__ void der3D_LLT_kernel(float* U, float *D1, float *D2, float *D3, int dimX, int dimY, int dimZ)
{
int i_p, i_m, j_m, j_p, k_p, k_m;
float dxx, dyy, dzz, denom_xx, denom_yy, denom_zz;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) {
/* symmetric boundary conditions (Neuman) */
i_p = i + 1; if (i_p == dimX) i_p = i - 1;
i_m = i - 1; if (i_m < 0) i_m = i + 1;
j_p = j + 1; if (j_p == dimY) j_p = j - 1;
j_m = j - 1; if (j_m < 0) j_m = j + 1;
k_p = k + 1; if (k_p == dimZ) k_p = k - 1;
k_m = k - 1; if (k_m < 0) k_m = k + 1;
int index = (dimX*dimY)*k + j*dimX+i;
dxx = U[(dimX*dimY)*k + j*dimX+i_p] - 2.0f*U[index] + U[(dimX*dimY)*k + j*dimX+i_m];
dyy = U[(dimX*dimY)*k + j_p*dimX+i] - 2.0f*U[index] + U[(dimX*dimY)*k + j_m*dimX+i];
dzz = U[(dimX*dimY)*k_p + j*dimX+i] - 2.0f*U[index] + U[(dimX*dimY)*k_m + j*dimX+i];
denom_xx = abs(dxx) + EPS_LLT;
denom_yy = abs(dyy) + EPS_LLT;
denom_zz = abs(dzz) + EPS_LLT;
D1[index] = dxx / denom_xx;
D2[index] = dyy / denom_yy;
D3[index] = dzz / denom_zz;
}
}
/*************************************************************************/
/**********************ROF-related functions *****************************/
/*************************************************************************/
/* first-order differences 1 */
__global__ void D1_func2D_ROF_kernel(float* Input, float* D1, int N, int M)
{
int i1, j1, i2;
float NOMx_1,NOMy_1,NOMy_0,denom1,denom2,T1;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int index = i + N*j;
if ((i >= 0) && (i < N) && (j >= 0) && (j < M)) {
/* boundary conditions (Neumann reflections) */
i1 = i + 1; if (i1 >= N) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= M) j1 = j-1;
/* Forward-backward differences */
NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */
NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */
NOMy_0 = Input[index] - Input[j*N + i2]; /* y- */
denom1 = NOMx_1*NOMx_1;
denom2 = 0.5f*(signLLT((float)NOMy_1) + signLLT((float)NOMy_0))*(MIN(abs((float)NOMy_1),abs((float)NOMy_0)));
denom2 = denom2*denom2;
T1 = sqrt(denom1 + denom2 + EPS_ROF);
D1[index] = NOMx_1/T1;
}
}
/* differences 2 */
__global__ void D2_func2D_ROF_kernel(float* Input, float* D2, int N, int M)
{
int i1, j1, j2;
float NOMx_1,NOMy_1,NOMx_0,denom1,denom2,T2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int index = i + N*j;
if ((i >= 0) && (i < (N)) && (j >= 0) && (j < (M))) {
/* boundary conditions (Neumann reflections) */
i1 = i + 1; if (i1 >= N) i1 = i-1;
j1 = j + 1; if (j1 >= M) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
/* Forward-backward differences */
NOMx_1 = Input[j1*N + i] - Input[index]; /* x+ */
NOMy_1 = Input[j*N + i1] - Input[index]; /* y+ */
NOMx_0 = Input[index] - Input[j2*N + i]; /* x- */
denom1 = NOMy_1*NOMy_1;
denom2 = 0.5f*(signLLT((float)NOMx_1) + signLLT((float)NOMx_0))*(MIN(abs((float)NOMx_1),abs((float)NOMx_0)));
denom2 = denom2*denom2;
T2 = sqrt(denom1 + denom2 + EPS_ROF);
D2[index] = NOMy_1/T2;
}
}
/* differences 1 */
__global__ void D1_func3D_ROF_kernel(float* Input, float* D1, int dimX, int dimY, int dimZ)
{
float NOMx_1, NOMy_1, NOMy_0, NOMz_1, NOMz_0, denom1, denom2,denom3, T1;
int i1,i2,k1,j1,j2,k2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (dimX*dimY)*k + j*dimX+i;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) {
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = Input[(dimX*dimY)*k + j1*dimX + i] - Input[index]; /* x+ */
NOMy_1 = Input[(dimX*dimY)*k + j*dimX + i1] - Input[index]; /* y+ */
NOMy_0 = Input[index] - Input[(dimX*dimY)*k + j*dimX + i2]; /* y- */
NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */
NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + j*dimX + i]; /* z- */
denom1 = NOMx_1*NOMx_1;
denom2 = 0.5*(signLLT(NOMy_1) + signLLT(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0)));
denom2 = denom2*denom2;
denom3 = 0.5*(signLLT(NOMz_1) + signLLT(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0)));
denom3 = denom3*denom3;
T1 = sqrt(denom1 + denom2 + denom3 + EPS_ROF);
D1[index] = NOMx_1/T1;
}
}
/* differences 2 */
__global__ void D2_func3D_ROF_kernel(float* Input, float* D2, int dimX, int dimY, int dimZ)
{
float NOMx_1, NOMy_1, NOMx_0, NOMz_1, NOMz_0, denom1, denom2, denom3, T2;
int i1,i2,k1,j1,j2,k2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (dimX*dimY)*k + j*dimX+i;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) {
/* symmetric boundary conditions (Neuman) */
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */
NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */
NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */
NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */
NOMz_0 = Input[index] - Input[(dimX*dimY)*k2 + (j)*dimX + i]; /* z- */
denom1 = NOMy_1*NOMy_1;
denom2 = 0.5*(signLLT(NOMx_1) + signLLT(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0)));
denom2 = denom2*denom2;
denom3 = 0.5*(signLLT(NOMz_1) + signLLT(NOMz_0))*(MIN(abs(NOMz_1),abs(NOMz_0)));
denom3 = denom3*denom3;
T2 = sqrt(denom1 + denom2 + denom3 + EPS_ROF);
D2[index] = NOMy_1/T2;
}
}
/* differences 3 */
__global__ void D3_func3D_ROF_kernel(float* Input, float* D3, int dimX, int dimY, int dimZ)
{
float NOMx_1, NOMy_1, NOMx_0, NOMy_0, NOMz_1, denom1, denom2, denom3, T3;
int i1,i2,k1,j1,j2,k2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (dimX*dimY)*k + j*dimX+i;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) {
i1 = i + 1; if (i1 >= dimX) i1 = i-1;
i2 = i - 1; if (i2 < 0) i2 = i+1;
j1 = j + 1; if (j1 >= dimY) j1 = j-1;
j2 = j - 1; if (j2 < 0) j2 = j+1;
k1 = k + 1; if (k1 >= dimZ) k1 = k-1;
k2 = k - 1; if (k2 < 0) k2 = k+1;
/* Forward-backward differences */
NOMx_1 = Input[(dimX*dimY)*k + (j1)*dimX + i] - Input[index]; /* x+ */
NOMy_1 = Input[(dimX*dimY)*k + (j)*dimX + i1] - Input[index]; /* y+ */
NOMy_0 = Input[index] - Input[(dimX*dimY)*k + (j)*dimX + i2]; /* y- */
NOMx_0 = Input[index] - Input[(dimX*dimY)*k + (j2)*dimX + i]; /* x- */
NOMz_1 = Input[(dimX*dimY)*k1 + j*dimX + i] - Input[index]; /* z+ */
denom1 = NOMz_1*NOMz_1;
denom2 = 0.5*(signLLT(NOMx_1) + signLLT(NOMx_0))*(MIN(abs(NOMx_1),abs(NOMx_0)));
denom2 = denom2*denom2;
denom3 = 0.5*(signLLT(NOMy_1) + signLLT(NOMy_0))*(MIN(abs(NOMy_1),abs(NOMy_0)));
denom3 = denom3*denom3;
T3 = sqrt(denom1 + denom2 + denom3 + EPS_ROF);
D3[index] = NOMz_1/T3;
}
}
/*************************************************************************/
/**********************ROF-LLT-related functions *************************/
/*************************************************************************/
__global__ void Update2D_LLT_ROF_kernel(float *U0, float *U, float *D1_LLT, float *D2_LLT, float *D1_ROF, float *D2_ROF, float lambdaROF, float lambdaLLT, float tau, int dimX, int dimY)
{
int i_p, i_m, j_m, j_p;
float div, laplc, dxx, dyy, dv1, dv2;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int index = i + dimX*j;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY)) {
/* symmetric boundary conditions (Neuman) */
i_p = i + 1; if (i_p == dimX) i_p = i - 1;
i_m = i - 1; if (i_m < 0) i_m = i + 1;
j_p = j + 1; if (j_p == dimY) j_p = j - 1;
j_m = j - 1; if (j_m < 0) j_m = j + 1;
index = j*dimX+i;
/*LLT-related part*/
dxx = D1_LLT[j*dimX+i_p] - 2.0f*D1_LLT[index] + D1_LLT[j*dimX+i_m];
dyy = D2_LLT[j_p*dimX+i] - 2.0f*D2_LLT[index] + D2_LLT[j_m*dimX+i];
laplc = dxx + dyy; /*build Laplacian*/
/*ROF-related part*/
dv1 = D1_ROF[index] - D1_ROF[j_m*dimX + i];
dv2 = D2_ROF[index] - D2_ROF[j*dimX + i_m];
div = dv1 + dv2; /*build Divirgent*/
/*combine all into one cost function to minimise */
U[index] += tau*(lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index]));
}
}
__global__ void Update3D_LLT_ROF_kernel(float *U0, float *U, float *D1_LLT, float *D2_LLT, float *D3_LLT, float *D1_ROF, float *D2_ROF, float *D3_ROF, float lambdaROF, float lambdaLLT, float tau, int dimX, int dimY, int dimZ)
{
int i_p, i_m, j_m, j_p, k_p, k_m;
float div, laplc, dxx, dyy, dzz, dv1, dv2, dv3;
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
if ((i >= 0) && (i < dimX) && (j >= 0) && (j < dimY) && (k >= 0) && (k < dimZ)) {
/* symmetric boundary conditions (Neuman) */
i_p = i + 1; if (i_p == dimX) i_p = i - 1;
i_m = i - 1; if (i_m < 0) i_m = i + 1;
j_p = j + 1; if (j_p == dimY) j_p = j - 1;
j_m = j - 1; if (j_m < 0) j_m = j + 1;
k_p = k + 1; if (k_p == dimZ) k_p = k - 1;
k_m = k - 1; if (k_m < 0) k_m = k + 1;
int index = (dimX*dimY)*k + j*dimX+i;
/*LLT-related part*/
dxx = D1_LLT[(dimX*dimY)*k + j*dimX+i_p] - 2.0f*D1_LLT[index] + D1_LLT[(dimX*dimY)*k + j*dimX+i_m];
dyy = D2_LLT[(dimX*dimY)*k + j_p*dimX+i] - 2.0f*D2_LLT[index] + D2_LLT[(dimX*dimY)*k + j_m*dimX+i];
dzz = D3_LLT[(dimX*dimY)*k_p + j*dimX+i] - 2.0f*D3_LLT[index] + D3_LLT[(dimX*dimY)*k_m + j*dimX+i];
laplc = dxx + dyy + dzz; /*build Laplacian*/
/*ROF-related part*/
dv1 = D1_ROF[index] - D1_ROF[(dimX*dimY)*k + j_m*dimX+i];
dv2 = D2_ROF[index] - D2_ROF[(dimX*dimY)*k + j*dimX+i_m];
dv3 = D3_ROF[index] - D3_ROF[(dimX*dimY)*k_m + j*dimX+i];
div = dv1 + dv2 + dv3; /*build Divirgent*/
/*combine all into one cost function to minimise */
U[index] += tau*(lambdaROF*(div) - lambdaLLT*(laplc) - (U[index] - U0[index]));
}
}
__global__ void ROFLLTcopy_kernel2D(float *Input, float* Output, int N, int M, int num_total)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
int index = xIndex + N*yIndex;
if (index < num_total) {
Output[index] = Input[index];
}
}
__global__ void ROFLLTResidCalc2D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int num_total)
{
int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int yIndex = blockDim.y * blockIdx.y + threadIdx.y;
int index = xIndex + N*yIndex;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
__global__ void ROFLLTcopy_kernel3D(float *Input, float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if (index < num_total) {
Output[index] = Input[index];
}
}
__global__ void ROFLLTResidCalc3D_kernel(float *Input1, float *Input2, float* Output, int N, int M, int Z, int num_total)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int k = blockDim.z * blockIdx.z + threadIdx.z;
int index = (N*M)*k + i + N*j;
if (index < num_total) {
Output[index] = Input1[index] - Input2[index];
}
}
/*******************************************************************/
/************************ HOST FUNCTION ****************************/
/*******************************************************************/
extern "C" int LLT_ROF_GPU_main(float *Input, float *Output, float *infovector, float lambdaROF, float lambdaLLT, int iterationsNumb, float tau, float epsil, int N, int M, int Z)
{
int deviceCount = -1; // number of devices
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "No CUDA devices found\n");
return -1;
}
float re;
re = 0.0f;
int DimTotal,count,n;
count = 0; n = 0;
float *d_input, *d_update;
float *D1_LLT=NULL, *D2_LLT=NULL, *D1_ROF=NULL, *D2_ROF=NULL, *d_update_prev=NULL;
if (Z == 0) {Z = 1;}
DimTotal = N*M*Z;
CHECK(cudaMalloc((void**)&d_input,DimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&d_update,DimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&D1_LLT,DimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&D2_LLT,DimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&D1_ROF,DimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&D2_ROF,DimTotal*sizeof(float)));
CHECK(cudaMemcpy(d_input,Input,DimTotal*sizeof(float),cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_update,Input,DimTotal*sizeof(float),cudaMemcpyHostToDevice))
if (epsil != 0.0f) checkCudaErrors( cudaMalloc((void**)&d_update_prev,DimTotal*sizeof(float)) );;
if (Z == 1) {
// TV - 2D case
dim3 dimBlock(BLKXSIZE2D,BLKYSIZE2D);
dim3 dimGrid(idivup(N,BLKXSIZE2D), idivup(M,BLKYSIZE2D));
for(n=0; n < iterationsNumb; n++) {
if ((epsil != 0.0f) && (n % 5 == 0)) {
ROFLLTcopy_kernel2D<<<dimGrid,dimBlock>>>(d_update, d_update_prev, N, M, DimTotal);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
}
/****************ROF******************/
/* calculate first-order differences */
D1_func2D_ROF_kernel<<<dimGrid,dimBlock>>>(d_update, D1_ROF, N, M);
CHECK(cudaDeviceSynchronize());
D2_func2D_ROF_kernel<<<dimGrid,dimBlock>>>(d_update, D2_ROF, N, M);
CHECK(cudaDeviceSynchronize());
/****************LLT******************/
/* estimate second-order derrivatives */
der2D_LLT_kernel<<<dimGrid,dimBlock>>>(d_update, D1_LLT, D2_LLT, N, M);
/* Joint update for ROF and LLT models */
Update2D_LLT_ROF_kernel<<<dimGrid,dimBlock>>>(d_input, d_update, D1_LLT, D2_LLT, D1_ROF, D2_ROF, lambdaROF, lambdaLLT, tau, N, M);
CHECK(cudaDeviceSynchronize());
if ((epsil != 0.0f) && (n % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
ROFLLTResidCalc2D_kernel<<<dimGrid,dimBlock>>>(d_update, d_update_prev, D1_ROF, N, M, DimTotal);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors( cudaPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(D1_ROF, D1_ROF + DimTotal);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_update, d_update + DimTotal);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
}
else {
// 3D case
dim3 dimBlock(BLKXSIZE,BLKYSIZE,BLKZSIZE);
dim3 dimGrid(idivup(N,BLKXSIZE), idivup(M,BLKYSIZE),idivup(Z,BLKXSIZE));
float *D3_LLT=NULL, *D3_ROF=NULL;
CHECK(cudaMalloc((void**)&D3_LLT,DimTotal*sizeof(float)));
CHECK(cudaMalloc((void**)&D3_ROF,DimTotal*sizeof(float)));
for(n=0; n < iterationsNumb; n++) {
if ((epsil != 0.0f) && (n % 5 == 0)) {
ROFLLTcopy_kernel3D<<<dimGrid,dimBlock>>>(d_update, d_update_prev, N, M, Z, DimTotal);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors(cudaPeekAtLastError() );
}
/****************ROF******************/
/* calculate first-order differences */
D1_func3D_ROF_kernel<<<dimGrid,dimBlock>>>(d_update, D1_ROF, N, M, Z);
CHECK(cudaDeviceSynchronize());
D2_func3D_ROF_kernel<<<dimGrid,dimBlock>>>(d_update, D2_ROF, N, M, Z);
CHECK(cudaDeviceSynchronize());
D3_func3D_ROF_kernel<<<dimGrid,dimBlock>>>(d_update, D3_ROF, N, M, Z);
CHECK(cudaDeviceSynchronize());
/****************LLT******************/
/* estimate second-order derrivatives */
der3D_LLT_kernel<<<dimGrid,dimBlock>>>(d_update, D1_LLT, D2_LLT, D3_LLT, N, M, Z);
/* Joint update for ROF and LLT models */
Update3D_LLT_ROF_kernel<<<dimGrid,dimBlock>>>(d_input, d_update, D1_LLT, D2_LLT, D3_LLT, D1_ROF, D2_ROF, D3_ROF, lambdaROF, lambdaLLT, tau, N, M, Z);
CHECK(cudaDeviceSynchronize());
if ((epsil != 0.0f) && (n % 5 == 0)) {
/* calculate norm - stopping rules using the Thrust library */
ROFLLTResidCalc3D_kernel<<<dimGrid,dimBlock>>>(d_update, d_update_prev, D1_ROF, N, M, Z, DimTotal);
checkCudaErrors( cudaDeviceSynchronize() );
checkCudaErrors( cudaPeekAtLastError() );
// setup arguments
square<float> unary_op;
thrust::plus<float> binary_op;
thrust::device_vector<float> d_vec(D1_ROF, D1_ROF + DimTotal);
float reduction = std::sqrt(thrust::transform_reduce(d_vec.begin(), d_vec.end(), unary_op, 0.0f, binary_op));
thrust::device_vector<float> d_vec2(d_update, d_update + DimTotal);
float reduction2 = std::sqrt(thrust::transform_reduce(d_vec2.begin(), d_vec2.end(), unary_op, 0.0f, binary_op));
// compute norm
re = (reduction/reduction2);
if (re < epsil) count++;
if (count > 3) break;
}
}
CHECK(cudaFree(D3_LLT));
CHECK(cudaFree(D3_ROF));
} /*end of else */
CHECK(cudaMemcpy(Output,d_update,DimTotal*sizeof(float),cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_input));
CHECK(cudaFree(d_update));
if (epsil != 0.0f) cudaFree(d_update_prev);
CHECK(cudaFree(D1_LLT));
CHECK(cudaFree(D2_LLT));
CHECK(cudaFree(D1_ROF));
CHECK(cudaFree(D2_ROF));
infovector[0] = (float)(n); /*iterations number (if stopped earlier based on tolerance)*/
infovector[1] = re; /* reached tolerance */
return 0;
}
|
2ff83d27c30322222ad5cb158ad8a50e468f2d79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void scale_channels_kernel(float *in_w_h_c, int size, int channel_size, float *scales_c, float *out)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
out[index] = in_w_h_c[index] * scales_c[index / channel_size];
}
} | 2ff83d27c30322222ad5cb158ad8a50e468f2d79.cu | #include "includes.h"
__global__ void scale_channels_kernel(float *in_w_h_c, int size, int channel_size, float *scales_c, float *out)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
out[index] = in_w_h_c[index] * scales_c[index / channel_size];
}
} |
e2ee1278e0e041622e2c426957f0a39ed12d1f5a.hip | // !!! This is a file automatically generated by hipify!!!
/**
renderbox2 - a physically based gpu renderer for research purposes
Copyright (C) - 2014 - Srinath Ravichandran
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Application specific headers.
#include <core/camera.h>
#include <core/film.h>
#include <core/filter.h>
#include <core/integrator.h>
#include <core/scene.h>
#include <core/util.h>
#include <renderers/samplerrenderer.h>
#include <util/cudatimer.h>
// Cuda specific headers.
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <cuda_occupancy.h>
#define NDEBUG 1
#include <thrust/random.h>
// Standard c++ headers.
namespace renderbox2
{
//
// Global kernels used by the sampler renderer.
//
__global__ void kernel_generate_perspective_camera_samples(
CameraSampleBuffer sample_buffer,
RayBuffer ray_buffer,
PerspectiveCamera camera,
uint32_t spp,
thrust::default_random_engine* generators,
uint32_t iteration,
uint4 window)
{
for (GIndexType tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < camera.get_total_pixels(); tidx += gridDim.x * blockDim.x)
{
thrust::default_random_engine rng;
thrust::uniform_real_distribution<float> u01(0, 1);
if (iteration == 0)
{
uint32_t seed = simplehash(tidx + iteration);
rng = thrust::default_random_engine(seed);
}
else
{
rng = generators[tidx];
}
uint32_t pixel_x = tidx % camera.get_film_width();
uint32_t pixel_y = tidx / camera.get_film_height();
for (uint32_t sample = 0; sample < spp; sample++)
{
int32_t idx = tidx * spp + sample;
float randx = u01(rng);
float randy = u01(rng);
float2 pixel = make_float2(static_cast<float>(pixel_x)+randx, static_cast<float>(pixel_y)+randy);
sample_buffer.m_alives[idx] = 1;
sample_buffer.m_continue_probability[idx] = 1.0f;
sample_buffer.m_contribution[idx] = make_float4(0.0f);
sample_buffer.m_ids[idx] = idx;
sample_buffer.m_pixel_coords[idx] = pixel;
sample_buffer.m_throughput[idx] = make_float4(1.0f);
ray_buffer.m_data[idx] = camera.generate_ray(pixel.x, pixel.y);
}
generators[tidx] = rng;
}
}
//
// Update film kernels.
//
__global__ void kernel_update_film(
CameraSampleBuffer sample_buffer,
Pixel* pixels,
uint2 film_size,
float* filter_table_coeffs,
float2 filter_width,
float2 filter_invwidth,
uint2 pixel_start
)
{
for (GIndexType tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < sample_buffer.m_size; tidx += gridDim.x + blockDim.x)
{
// NOTE: for now we assume only one sample per pixel.
float4 contribution = sample_buffer.m_contribution[tidx];
float2 pixel = sample_buffer.m_pixel_coords[tidx];
float dimageX = pixel.x - 0.5f;
float dimageY = pixel.y - 0.5f;
// compute window of pixels that this sample affects
int x0 = static_cast<int>(ceilf(dimageX - filter_width.x));
int x1 = static_cast<int>(floorf(dimageX + filter_width.x));
int y0 = static_cast<int>(ceilf(dimageY - filter_width.y));
int y1 = static_cast<int>(floorf(dimageY + filter_width.y));
x0 = fmaxf(x0, pixel_start.x);
x1 = fminf(x1, pixel_start.x + film_size.x - 1);
y0 = fmaxf(y0, pixel_start.y);
y1 = fminf(y1, pixel_start.y + film_size.y - 1);
// Precompute indices.
// NOTE: This is a DANGEROUS ASSUMPTION.!
int ifx[16];
int ify[16];
for (int x = x0; x <= x1; x++)
{
float fx = fabsf((x - dimageX) * filter_invwidth.x * FILTER_TABLE_SIZE);
ifx[x - x0] = min(static_cast<int>(floorf(fx)), FILTER_TABLE_SIZE - 1);
}
for (int y = y0; y <= y1; y++)
{
float fy = fabsf((y - dimageY) * filter_invwidth.y * FILTER_TABLE_SIZE);
ify[y - y0] = min(static_cast<int>(floorf(fy)), FILTER_TABLE_SIZE - 1);
}
for (int y = y0; y <= y1; y++)
{
for (int x = x0; x <= x1; x++)
{
int offset = ify[y - y0] * FILTER_TABLE_SIZE + ifx[x - x0];
float filter_weight = filter_table_coeffs[offset];
// Update in atomic fashion only.
Pixel& pixel = pixels[(y - pixel_start.y) * film_size.x + (x - pixel_start.x)];
atomicAdd(&(pixel.Lrgb[0]), filter_weight * contribution.x);
atomicAdd(&(pixel.Lrgb[1]), filter_weight * contribution.y);
atomicAdd(&(pixel.Lrgb[2]), filter_weight * contribution.z);
atomicAdd(&pixel.weightsum, filter_weight);
}
}
}
}
//
// Integrator private methods.
//
void SamplerRenderer::compute(uint32_t iteration)
{
// NOTE: For now we are assuming samples can be allocated in memory as such without needing bucketing.
// Create the samples.
const uint2 film_dimensions = make_uint2(m_scene->get_camera()->get_film_width(), m_scene->get_camera()->get_film_height());
const uint32_t num_samples = film_dimensions.x * film_dimensions.y * m_params.m_spp;
if (iteration == 0)
{
alloc_rng_states(film_dimensions.x * film_dimensions.y);
}
CameraSampleBufferClass csamples(m_allocator);
csamples.allocate(num_samples);
CameraSampleBuffer csb = csamples.get_buffer();
// Create camera rays.
RayBufferClass primary_rays(m_allocator);
primary_rays.allocate(num_samples);
// call a primary ray generation kernel.
RayBuffer rb = primary_rays.get_buffer();
dim3 grid_size(256, 1, 1);
dim3 block_size(256, 1, 1);
CudaTimer t1("primary rays timer");
thrust::default_random_engine* rng = static_cast<thrust::default_random_engine*>(m_rng_generators.m_ptr);
t1.start();
hipLaunchKernelGGL(( kernel_generate_perspective_camera_samples), dim3(grid_size), dim3(block_size), 0, 0, csb, rb, *(m_scene->get_camera()), m_params.m_spp, rng, iteration, make_uint4(0));
t1.stop();
// Call trace functionality.
void* data[] = { &csb, &rb, &iteration };
m_integrator->render(m_scene, m_tracer, data, 2);
// Collect results and update.
Film* film = m_scene->get_output_film();
Filter* filter = film->get_filter();
CudaTimer t2("update timer");
t2.start();
kernel_update_film <<<grid_size, block_size >> >(csb, static_cast<Pixel*>(film->get_pixels().m_ptr), make_uint2(film->get_width(), film->get_height()),
static_cast<float*>(film->get_filter_table().m_ptr), make_float2(filter->m_xwidth, filter->m_ywidth), make_float2(filter->m_inv_xwidth, filter->m_inv_ywidth), make_uint2(0, 0));
t2.stop();
float ms = t2.get_ms();
std::cout << "Filtering : " << ms << std::endl;
}
void SamplerRenderer::alloc_rng_states(uint32_t samples)
{
m_rng_generators = m_allocator.allocate(sizeof(thrust::default_random_engine) * samples);
}
}
| e2ee1278e0e041622e2c426957f0a39ed12d1f5a.cu |
/**
renderbox2 - a physically based gpu renderer for research purposes
Copyright (C) - 2014 - Srinath Ravichandran
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
// Application specific headers.
#include <core/camera.h>
#include <core/film.h>
#include <core/filter.h>
#include <core/integrator.h>
#include <core/scene.h>
#include <core/util.h>
#include <renderers/samplerrenderer.h>
#include <util/cudatimer.h>
// Cuda specific headers.
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_occupancy.h>
#define NDEBUG 1
#include <thrust/random.h>
// Standard c++ headers.
namespace renderbox2
{
//
// Global kernels used by the sampler renderer.
//
__global__ void kernel_generate_perspective_camera_samples(
CameraSampleBuffer sample_buffer,
RayBuffer ray_buffer,
PerspectiveCamera camera,
uint32_t spp,
thrust::default_random_engine* generators,
uint32_t iteration,
uint4 window)
{
for (GIndexType tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < camera.get_total_pixels(); tidx += gridDim.x * blockDim.x)
{
thrust::default_random_engine rng;
thrust::uniform_real_distribution<float> u01(0, 1);
if (iteration == 0)
{
uint32_t seed = simplehash(tidx + iteration);
rng = thrust::default_random_engine(seed);
}
else
{
rng = generators[tidx];
}
uint32_t pixel_x = tidx % camera.get_film_width();
uint32_t pixel_y = tidx / camera.get_film_height();
for (uint32_t sample = 0; sample < spp; sample++)
{
int32_t idx = tidx * spp + sample;
float randx = u01(rng);
float randy = u01(rng);
float2 pixel = make_float2(static_cast<float>(pixel_x)+randx, static_cast<float>(pixel_y)+randy);
sample_buffer.m_alives[idx] = 1;
sample_buffer.m_continue_probability[idx] = 1.0f;
sample_buffer.m_contribution[idx] = make_float4(0.0f);
sample_buffer.m_ids[idx] = idx;
sample_buffer.m_pixel_coords[idx] = pixel;
sample_buffer.m_throughput[idx] = make_float4(1.0f);
ray_buffer.m_data[idx] = camera.generate_ray(pixel.x, pixel.y);
}
generators[tidx] = rng;
}
}
//
// Update film kernels.
//
__global__ void kernel_update_film(
CameraSampleBuffer sample_buffer,
Pixel* pixels,
uint2 film_size,
float* filter_table_coeffs,
float2 filter_width,
float2 filter_invwidth,
uint2 pixel_start
)
{
for (GIndexType tidx = threadIdx.x + blockIdx.x * blockDim.x; tidx < sample_buffer.m_size; tidx += gridDim.x + blockDim.x)
{
// NOTE: for now we assume only one sample per pixel.
float4 contribution = sample_buffer.m_contribution[tidx];
float2 pixel = sample_buffer.m_pixel_coords[tidx];
float dimageX = pixel.x - 0.5f;
float dimageY = pixel.y - 0.5f;
// compute window of pixels that this sample affects
int x0 = static_cast<int>(ceilf(dimageX - filter_width.x));
int x1 = static_cast<int>(floorf(dimageX + filter_width.x));
int y0 = static_cast<int>(ceilf(dimageY - filter_width.y));
int y1 = static_cast<int>(floorf(dimageY + filter_width.y));
x0 = fmaxf(x0, pixel_start.x);
x1 = fminf(x1, pixel_start.x + film_size.x - 1);
y0 = fmaxf(y0, pixel_start.y);
y1 = fminf(y1, pixel_start.y + film_size.y - 1);
// Precompute indices.
// NOTE: This is a DANGEROUS ASSUMPTION.!
int ifx[16];
int ify[16];
for (int x = x0; x <= x1; x++)
{
float fx = fabsf((x - dimageX) * filter_invwidth.x * FILTER_TABLE_SIZE);
ifx[x - x0] = min(static_cast<int>(floorf(fx)), FILTER_TABLE_SIZE - 1);
}
for (int y = y0; y <= y1; y++)
{
float fy = fabsf((y - dimageY) * filter_invwidth.y * FILTER_TABLE_SIZE);
ify[y - y0] = min(static_cast<int>(floorf(fy)), FILTER_TABLE_SIZE - 1);
}
for (int y = y0; y <= y1; y++)
{
for (int x = x0; x <= x1; x++)
{
int offset = ify[y - y0] * FILTER_TABLE_SIZE + ifx[x - x0];
float filter_weight = filter_table_coeffs[offset];
// Update in atomic fashion only.
Pixel& pixel = pixels[(y - pixel_start.y) * film_size.x + (x - pixel_start.x)];
atomicAdd(&(pixel.Lrgb[0]), filter_weight * contribution.x);
atomicAdd(&(pixel.Lrgb[1]), filter_weight * contribution.y);
atomicAdd(&(pixel.Lrgb[2]), filter_weight * contribution.z);
atomicAdd(&pixel.weightsum, filter_weight);
}
}
}
}
//
// Integrator private methods.
//
void SamplerRenderer::compute(uint32_t iteration)
{
// NOTE: For now we are assuming samples can be allocated in memory as such without needing bucketing.
// Create the samples.
const uint2 film_dimensions = make_uint2(m_scene->get_camera()->get_film_width(), m_scene->get_camera()->get_film_height());
const uint32_t num_samples = film_dimensions.x * film_dimensions.y * m_params.m_spp;
if (iteration == 0)
{
alloc_rng_states(film_dimensions.x * film_dimensions.y);
}
CameraSampleBufferClass csamples(m_allocator);
csamples.allocate(num_samples);
CameraSampleBuffer csb = csamples.get_buffer();
// Create camera rays.
RayBufferClass primary_rays(m_allocator);
primary_rays.allocate(num_samples);
// call a primary ray generation kernel.
RayBuffer rb = primary_rays.get_buffer();
dim3 grid_size(256, 1, 1);
dim3 block_size(256, 1, 1);
CudaTimer t1("primary rays timer");
thrust::default_random_engine* rng = static_cast<thrust::default_random_engine*>(m_rng_generators.m_ptr);
t1.start();
kernel_generate_perspective_camera_samples<<<grid_size, block_size>>>(csb, rb, *(m_scene->get_camera()), m_params.m_spp, rng, iteration, make_uint4(0));
t1.stop();
// Call trace functionality.
void* data[] = { &csb, &rb, &iteration };
m_integrator->render(m_scene, m_tracer, data, 2);
// Collect results and update.
Film* film = m_scene->get_output_film();
Filter* filter = film->get_filter();
CudaTimer t2("update timer");
t2.start();
kernel_update_film <<<grid_size, block_size >> >(csb, static_cast<Pixel*>(film->get_pixels().m_ptr), make_uint2(film->get_width(), film->get_height()),
static_cast<float*>(film->get_filter_table().m_ptr), make_float2(filter->m_xwidth, filter->m_ywidth), make_float2(filter->m_inv_xwidth, filter->m_inv_ywidth), make_uint2(0, 0));
t2.stop();
float ms = t2.get_ms();
std::cout << "Filtering : " << ms << std::endl;
}
void SamplerRenderer::alloc_rng_states(uint32_t samples)
{
m_rng_generators = m_allocator.allocate(sizeof(thrust::default_random_engine) * samples);
}
}
|
f5a1853aa93f35dad4d6f834a437a029b2bef470.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* neighborlist.cu
*
* Created on: Sep 4, 2016
* Author: uwe
*/
#include "neighborlist_modes.h"
#include "nativeTypesWrapper.h"
#include "DeviceNLGrid.h"
#include "DeviceProtein.h"
#include "DeviceParamTable.h"
#include "SimParam.h"
#include "forcefield.h"
#include "macros.h"
#include "Types_6D_Modes.h"
namespace as {
/*
* In comparison to the NL force calculation without modes, this methode takes the deformed coordinates of the receptor instead of its original position,
* which does not contain mode deformation.
* Note that if the forces that are acting on the receptor are calculated, they have to be rotated back into the system of the receptor.
*/
template<typename REAL>
__global__ void d_NLPotForce(
const d_NLGrid<REAL> grid,
const d_Protein<REAL> rec,
const d_Protein<REAL> lig,
const d_ParamTable<REAL> table,
const SimParam<REAL> simParam,
const unsigned numDOFs,
const REAL* RecPosX,
const REAL* RecPosY,
const REAL* RecPosZ,
const REAL* LigPosX,
const REAL* LigPosY,
const REAL* LigPosZ,
REAL* outLig_fx,
REAL* outLig_fy,
REAL* outLig_fz,
REAL* outLigand_E)
{
using real3_t = typename TypeWrapper<REAL>::real3_t;
const unsigned i = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned LigNumEl = lig.numAtoms;
if (i < LigNumEl*numDOFs) {
const unsigned LigAttrIdx = i % LigNumEl;
const unsigned atomTypeLig = lig.type[LigAttrIdx];
if (atomTypeLig != 0) {
const REAL posLigX = LigPosX[i];
const REAL posLigY = LigPosY[i];
const REAL posLigZ = LigPosZ[i];
/* test if particle is out of bounds and perform data fetch and neigbourlist calculations */
if (!( (posLigX < grid.minDim.x || posLigX > grid.maxDim.x)
|| (posLigY < grid.minDim.y || posLigY > grid.maxDim.y)
|| (posLigZ < grid.minDim.z || posLigZ > grid.maxDim.z) ))
{
const uint2 nDesc = tex3D<uint2>(grid.tex,
(posLigX - grid.minDim.x) * grid.dVox_inv + 0.5,
(posLigY - grid.minDim.y) * grid.dVox_inv + 0.5,
(posLigZ - grid.minDim.z) * grid.dVox_inv + 0.5);
/* numEl = x; idx = y */
real3_t fAcc = {0,0,0};
REAL eAcc = 0;
for (unsigned j = 0; j < nDesc.x; ++j) {
const unsigned nIdx = grid.neighborList[nDesc.y + j];
REAL dx = posLigX - RecPosX[nIdx];
REAL dy = posLigY - RecPosX[nIdx];
REAL dz = posLigZ - RecPosX[nIdx];
const REAL dr2 = dx * dx + dy * dy + dz * dz;
const REAL dPlateau2 = grid.dPlateau2;
if ((dr2) > dPlateau2) {
continue;
}
constexpr REAL one = static_cast<REAL>(1.0);
const REAL dr2_inv = one/dr2; // inverse of dr2
// Scale distances
dx *= dr2_inv;
dy *= dr2_inv;
dz *= dr2_inv;
real3_t fVdW;
REAL eVdW;
const size_t atomTypeRec = rec.type[nIdx];
// calculate energy and potential/energy of LJ/VdW potential
auto const params = table.getParams(atomTypeRec-1, atomTypeLig-1);
LJPotForce(dr2, dr2_inv, dx, dy, dz,
params,
one, table.shape,
fVdW.x, fVdW.y, fVdW.z, eVdW);
fAcc.x += fVdW.x;
fAcc.y += fVdW.y;
fAcc.z += fVdW.z;
eAcc += eVdW;
const REAL chargeLig = lig.charge[LigAttrIdx];
const REAL chargeRec = rec.charge[nIdx];
const REAL chargeLigRec = chargeLig * chargeRec * simParam.ffelec;
const bool calc_elec = abs(chargeLigRec) > 0.001; // evaluate electric potential
REAL dPlateau2_inv = 1/grid.dPlateau2;
const REAL ratio = sqrt(dr2*dPlateau2_inv);
REAL rdx = ratio*dx;
REAL rdy = ratio*dy;
REAL rdz = ratio*dz;
LJPotForce(dPlateau2, dPlateau2_inv, rdx, rdy, rdz,
params,
one, table.shape,
fVdW.x, fVdW.y, fVdW.z, eVdW);
fAcc.x -= fVdW.x;
fAcc.y -= fVdW.y;
fAcc.z -= fVdW.z;
eAcc -= eVdW;
if (calc_elec) {
REAL eEl;
real3_t fEl;
// calculate energy and potential/energy of charge potential
if (false) {
printf("%u %f %f %f %u\n" ,
i, posLigX, posLigY, posLigZ, atomTypeLig);
}
ChargePotForce(dr2_inv, dx, dy, dz,
chargeLigRec,
one, simParam.dielec,
fEl.x, fEl.y, fEl.z, eEl);
fAcc.x += fEl.x;
fAcc.y += fEl.y;
fAcc.z += fEl.z;
eAcc += eEl;
ChargePotForce(dPlateau2_inv, rdx, rdy, rdz,
chargeLigRec,
one, simParam.dielec,
fEl.x, fEl.y, fEl.z, eEl);
fAcc.x -= fEl.x;
fAcc.y -= fEl.y;
fAcc.z -= fEl.z;
eAcc -= eEl;
}
}
/* store results back to global memory */
if (nDesc.x > 0) {
outLig_fx[i] += fAcc.x;
outLig_fy[i] += fAcc.y;
outLig_fz[i] += fAcc.z;
outLigand_E[i] += eAcc;
}
}
} // if (atomtype != 0)
}
}
template<typename REAL>
void d_NLPotForce(
unsigned blockSize,
unsigned gridSize,
const hipStream_t &stream,
const d_NLGrid<REAL>& grid,
const d_Protein<REAL>& rec,
const d_Protein<REAL>& lig,
const d_ParamTable<REAL>& table,
const SimParam<REAL>& simParam,
const unsigned& numDOFs,
const REAL* RecPosX,
const REAL* RecPosY,
const REAL* RecPosZ,
const REAL* LigPosX,
const REAL* LigPosY,
const REAL* LigPosZ,
REAL* outLig_fx,
REAL* outLig_fy,
REAL* outLig_fz,
REAL* outLigand_E)
{
cudaVerifyKernel((
hipLaunchKernelGGL(( d_NLPotForce), dim3(gridSize), dim3(blockSize), 0, stream,
grid,
rec,
lig,
table,
simParam,
numDOFs,
RecPosX,
RecPosY,
RecPosZ,
LigPosX,
LigPosY,
LigPosZ,
outLig_fx,
outLig_fy,
outLig_fz,
outLigand_E
)
));
}
template
void d_NLPotForce<float>(
unsigned blockSize,
unsigned gridSize,
const hipStream_t &stream,
const d_NLGrid<float>& grid,
const d_Protein<float>& rec,
const d_Protein<float>& lig,
const d_ParamTable<float>& table,
const SimParam<float>& simParam,
const unsigned& numDOFs,
const float* RecPosX,
const float* RecPosY,
const float* RecPosZ,
const float* LigPosX,
const float* LigPosY,
const float* LigPosZ,
float* outLig_fx,
float* outLig_fy,
float* outLig_fz,
float* outLigand_E
);
template
void d_NLPotForce<double>(
unsigned blockSize,
unsigned gridSize,
const hipStream_t &stream,
const d_NLGrid<double>& grid,
const d_Protein<double>& rec,
const d_Protein<double>& lig,
const d_ParamTable<double>& table,
const SimParam<double>& simParam,
const unsigned& numDOFs,
const double* RecPosX,
const double* RecPosY,
const double* RecPosZ,
const double* LigPosX,
const double* LigPosY,
const double* LigPosZ,
double* outLig_fx,
double* outLig_fy,
double* outLig_fz,
double* outLigand_E
);
} // namespace as
| f5a1853aa93f35dad4d6f834a437a029b2bef470.cu | /*
* neighborlist.cu
*
* Created on: Sep 4, 2016
* Author: uwe
*/
#include "neighborlist_modes.h"
#include "nativeTypesWrapper.h"
#include "DeviceNLGrid.h"
#include "DeviceProtein.h"
#include "DeviceParamTable.h"
#include "SimParam.h"
#include "forcefield.h"
#include "macros.h"
#include "Types_6D_Modes.h"
namespace as {
/*
* In comparison to the NL force calculation without modes, this methode takes the deformed coordinates of the receptor instead of its original position,
* which does not contain mode deformation.
* Note that if the forces that are acting on the receptor are calculated, they have to be rotated back into the system of the receptor.
*/
template<typename REAL>
__global__ void d_NLPotForce(
const d_NLGrid<REAL> grid,
const d_Protein<REAL> rec,
const d_Protein<REAL> lig,
const d_ParamTable<REAL> table,
const SimParam<REAL> simParam,
const unsigned numDOFs,
const REAL* RecPosX,
const REAL* RecPosY,
const REAL* RecPosZ,
const REAL* LigPosX,
const REAL* LigPosY,
const REAL* LigPosZ,
REAL* outLig_fx,
REAL* outLig_fy,
REAL* outLig_fz,
REAL* outLigand_E)
{
using real3_t = typename TypeWrapper<REAL>::real3_t;
const unsigned i = blockDim.x * blockIdx.x + threadIdx.x;
const unsigned LigNumEl = lig.numAtoms;
if (i < LigNumEl*numDOFs) {
const unsigned LigAttrIdx = i % LigNumEl;
const unsigned atomTypeLig = lig.type[LigAttrIdx];
if (atomTypeLig != 0) {
const REAL posLigX = LigPosX[i];
const REAL posLigY = LigPosY[i];
const REAL posLigZ = LigPosZ[i];
/* test if particle is out of bounds and perform data fetch and neigbourlist calculations */
if (!( (posLigX < grid.minDim.x || posLigX > grid.maxDim.x)
|| (posLigY < grid.minDim.y || posLigY > grid.maxDim.y)
|| (posLigZ < grid.minDim.z || posLigZ > grid.maxDim.z) ))
{
const uint2 nDesc = tex3D<uint2>(grid.tex,
(posLigX - grid.minDim.x) * grid.dVox_inv + 0.5,
(posLigY - grid.minDim.y) * grid.dVox_inv + 0.5,
(posLigZ - grid.minDim.z) * grid.dVox_inv + 0.5);
/* numEl = x; idx = y */
real3_t fAcc = {0,0,0};
REAL eAcc = 0;
for (unsigned j = 0; j < nDesc.x; ++j) {
const unsigned nIdx = grid.neighborList[nDesc.y + j];
REAL dx = posLigX - RecPosX[nIdx];
REAL dy = posLigY - RecPosX[nIdx];
REAL dz = posLigZ - RecPosX[nIdx];
const REAL dr2 = dx * dx + dy * dy + dz * dz;
const REAL dPlateau2 = grid.dPlateau2;
if ((dr2) > dPlateau2) {
continue;
}
constexpr REAL one = static_cast<REAL>(1.0);
const REAL dr2_inv = one/dr2; // inverse of dr2
// Scale distances
dx *= dr2_inv;
dy *= dr2_inv;
dz *= dr2_inv;
real3_t fVdW;
REAL eVdW;
const size_t atomTypeRec = rec.type[nIdx];
// calculate energy and potential/energy of LJ/VdW potential
auto const params = table.getParams(atomTypeRec-1, atomTypeLig-1);
LJPotForce(dr2, dr2_inv, dx, dy, dz,
params,
one, table.shape,
fVdW.x, fVdW.y, fVdW.z, eVdW);
fAcc.x += fVdW.x;
fAcc.y += fVdW.y;
fAcc.z += fVdW.z;
eAcc += eVdW;
const REAL chargeLig = lig.charge[LigAttrIdx];
const REAL chargeRec = rec.charge[nIdx];
const REAL chargeLigRec = chargeLig * chargeRec * simParam.ffelec;
const bool calc_elec = abs(chargeLigRec) > 0.001; // evaluate electric potential
REAL dPlateau2_inv = 1/grid.dPlateau2;
const REAL ratio = sqrt(dr2*dPlateau2_inv);
REAL rdx = ratio*dx;
REAL rdy = ratio*dy;
REAL rdz = ratio*dz;
LJPotForce(dPlateau2, dPlateau2_inv, rdx, rdy, rdz,
params,
one, table.shape,
fVdW.x, fVdW.y, fVdW.z, eVdW);
fAcc.x -= fVdW.x;
fAcc.y -= fVdW.y;
fAcc.z -= fVdW.z;
eAcc -= eVdW;
if (calc_elec) {
REAL eEl;
real3_t fEl;
// calculate energy and potential/energy of charge potential
if (false) {
printf("%u %f %f %f %u\n" ,
i, posLigX, posLigY, posLigZ, atomTypeLig);
}
ChargePotForce(dr2_inv, dx, dy, dz,
chargeLigRec,
one, simParam.dielec,
fEl.x, fEl.y, fEl.z, eEl);
fAcc.x += fEl.x;
fAcc.y += fEl.y;
fAcc.z += fEl.z;
eAcc += eEl;
ChargePotForce(dPlateau2_inv, rdx, rdy, rdz,
chargeLigRec,
one, simParam.dielec,
fEl.x, fEl.y, fEl.z, eEl);
fAcc.x -= fEl.x;
fAcc.y -= fEl.y;
fAcc.z -= fEl.z;
eAcc -= eEl;
}
}
/* store results back to global memory */
if (nDesc.x > 0) {
outLig_fx[i] += fAcc.x;
outLig_fy[i] += fAcc.y;
outLig_fz[i] += fAcc.z;
outLigand_E[i] += eAcc;
}
}
} // if (atomtype != 0)
}
}
template<typename REAL>
void d_NLPotForce(
unsigned blockSize,
unsigned gridSize,
const cudaStream_t &stream,
const d_NLGrid<REAL>& grid,
const d_Protein<REAL>& rec,
const d_Protein<REAL>& lig,
const d_ParamTable<REAL>& table,
const SimParam<REAL>& simParam,
const unsigned& numDOFs,
const REAL* RecPosX,
const REAL* RecPosY,
const REAL* RecPosZ,
const REAL* LigPosX,
const REAL* LigPosY,
const REAL* LigPosZ,
REAL* outLig_fx,
REAL* outLig_fy,
REAL* outLig_fz,
REAL* outLigand_E)
{
cudaVerifyKernel((
d_NLPotForce<<<gridSize, blockSize, 0, stream>>> (
grid,
rec,
lig,
table,
simParam,
numDOFs,
RecPosX,
RecPosY,
RecPosZ,
LigPosX,
LigPosY,
LigPosZ,
outLig_fx,
outLig_fy,
outLig_fz,
outLigand_E
)
));
}
template
void d_NLPotForce<float>(
unsigned blockSize,
unsigned gridSize,
const cudaStream_t &stream,
const d_NLGrid<float>& grid,
const d_Protein<float>& rec,
const d_Protein<float>& lig,
const d_ParamTable<float>& table,
const SimParam<float>& simParam,
const unsigned& numDOFs,
const float* RecPosX,
const float* RecPosY,
const float* RecPosZ,
const float* LigPosX,
const float* LigPosY,
const float* LigPosZ,
float* outLig_fx,
float* outLig_fy,
float* outLig_fz,
float* outLigand_E
);
template
void d_NLPotForce<double>(
unsigned blockSize,
unsigned gridSize,
const cudaStream_t &stream,
const d_NLGrid<double>& grid,
const d_Protein<double>& rec,
const d_Protein<double>& lig,
const d_ParamTable<double>& table,
const SimParam<double>& simParam,
const unsigned& numDOFs,
const double* RecPosX,
const double* RecPosY,
const double* RecPosZ,
const double* LigPosX,
const double* LigPosY,
const double* LigPosZ,
double* outLig_fx,
double* outLig_fy,
double* outLig_fz,
double* outLigand_E
);
} // namespace as
|
02ae2461aa7146ce6ab8f6380101d70b93cdc039.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
// error handling
#include <helper_cuda.h>
// SDK package
#include <helper_functions.h>
#define MAX_GPU_COUNT 32
typedef struct{
float *d_A;
float *d_B;
float *d_C;
hipStream_t stream;
bool *hasRead_A;
bool *hasRead_B;
} GPUPlan;
template <int block_size> __global__ void matrixMultiply(float *d_A, float *d_B, float *d_C, int wA, int wB, unsigned int offsetX, unsigned int offsetY){
unsigned int x_index = blockDim.x * blockIdx.x + threadIdx.x + offsetX;
unsigned int y_index = blockDim.y * blockIdx.y + threadIdx.y + offsetY;
__shared__ float blockA[block_size][block_size];
__shared__ float blockB[block_size][block_size];
float Csub = 0;
int tx = threadIdx.x, ty = threadIdx.y;
// __syncthreads() acts as a barrier at which all threads in the block must wait before any is allowed to proceed.
for(int i = 0; i < wA / block_size; i++){
blockA[ty][tx] = d_A[y_index * wA + (i * block_size + tx)];
blockB[ty][tx] = d_B[(i * block_size + ty) * wB + x_index];
__syncthreads();
for(int j = 0; j < block_size; j++){
Csub += (blockA[ty][j] * blockB[j][tx]);
}
__syncthreads();
}
d_C[y_index * wA + x_index] = Csub; //blockC[y_index % blockDim.y][x_index % blockDim.x];
}
__global__ void print_gpu_content(float* d_A, float* d_B, float* d_C, int wA, int hA, int wB, int hB, int wC, int hC, unsigned int offsetX, unsigned int offsetY){
printf("At offsetX: %d, offsetY: %d\n", offsetX, offsetY);
printf("d_A:\n");
for(int i = 0; i < hA; i++){
for(int j = 0; j < wA; j++)
printf("%5.1f ", d_A[i * wA + j]);
printf("\n");
}
printf("d_B:\n");
for(int i = 0; i < hB; i++){
for(int j = 0; j < wB; j++)
printf("%5.1f ", d_B[i * wB + j]);
printf("\n");
}
printf("d_C:\n");
for(int i = 0; i < hC; i++){
for(int j = 0; j < wC; j++)
printf("%5.1f ", d_C[i * wC + j]);
printf("\n");
}
}
void print_cpu_content(float* h_A, float* h_B, float* h_C, int wA, int hA, int wB, int hB, int wC, int hC){
printf("h_A:\n");
for(int i = 0; i < hA; i++){
for(int j = 0; j < wA; j++)
printf("%5.1f ", h_A[i * wA + j]);
printf("\n");
}
printf("h_B:\n");
for(int i = 0; i < hB; i++){
for(int j = 0; j < wB; j++)
printf("%5.1f ", h_B[i * wB + j]);
printf("\n");
}
printf("h_C:\n");
for(int i = 0; i < hC; i++){
for(int j = 0; j < wC; j++)
printf("%5.1f ", h_C[i * wC + j]);
printf("\n");
}
}
void init_matrix(float* matrix, int size, float val){
for(int i = 0; i < size; i++)
matrix[i] = i;//val;
}
int main(int argc, char* argv[]){
if(argc < 5){
printf("Usage: ./matrixMulMultiGPUTiling <GPU_N> <A_height> <A_width> <B_height> <B_width>\n");
return 0;
}
int GPU_N, Sys_GPU_N;
GPU_N = atoi(argv[1]);
checkCudaErrors(hipGetDeviceCount(&Sys_GPU_N));
if(GPU_N > Sys_GPU_N){
printf("GPU count should be less than %d\n", Sys_GPU_N);
}
printf("GPU count: %d\n", GPU_N);
const int dimA_y = atoi(argv[2]), dimA_x = atoi(argv[3]), dimB_y = atoi(argv[4]), dimB_x = atoi(argv[5]);
const int block_size = 32;
GPUPlan plan[MAX_GPU_COUNT];
dim3 dimsA(dimA_x, dimA_y);
dim3 dimsB(dimB_x, dimB_y);
dim3 dimsC(dimB_x, dimA_y);
float *h_A, *h_B, *h_C;
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
checkCudaErrors(hipHostMalloc((void**)(&h_A), mem_size_A));
init_matrix(h_A, size_A, 1.0);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
checkCudaErrors(hipHostMalloc((void**)(&h_B), mem_size_B));
init_matrix(h_B, size_B, 2.0);
unsigned int size_C = dimsC.x * dimsC.y;
unsigned int mem_size_C = sizeof(float) * size_C;
checkCudaErrors(hipHostMalloc((void**)(&h_C), mem_size_C));
init_matrix(h_C, size_C, 0.0);
// allocate space for device variable
for(int i = 0; i < GPU_N; i++){
checkCudaErrors(hipSetDevice(i));
checkCudaErrors(hipStreamCreate(&plan[i].stream));
checkCudaErrors(hipMalloc((void **)(&plan[i].d_A), mem_size_A));
checkCudaErrors(hipMalloc((void **)(&plan[i].d_B), mem_size_B));
checkCudaErrors(hipMalloc((void **)(&plan[i].d_C), mem_size_C));
plan[i].hasRead_A = (bool*)malloc(dimsC.y / block_size * sizeof(bool));
plan[i].hasRead_B = (bool*)malloc(dimsC.x / block_size * sizeof(bool));
}
int wA = dimsA.x, wB = dimsB.x, wC = dimsC.x;
int hA = dimsA.y, hB = dimsB.y, hC = dimsC.y;
int gpu_idx = 0;
// print_cpu_content(h_A, h_B, h_C, wA, hA, wB, hB, wC, hC);
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipEventRecord(start));
int niter = 1;
dim3 threads(block_size, block_size);
dim3 grid(1, 1);
for(int t = 0; t < niter; t++)
for(int i = 0; i < dimsC.y; i += block_size)
for(int j = 0; j < dimsC.x; j += block_size){
checkCudaErrors(hipSetDevice(gpu_idx));
hipStream_t curr_stream = plan[gpu_idx].stream;
float *d_A = plan[gpu_idx].d_A, *d_B = plan[gpu_idx].d_B, *d_C = plan[gpu_idx].d_C;
if(!plan[gpu_idx].hasRead_A[i / block_size])
hipMemcpy2DAsync(d_A + i * wA, wA * sizeof(float), h_A + i * wA, wA * sizeof(float), wA * sizeof(float), block_size, hipMemcpyHostToDevice, curr_stream);
if(!plan[gpu_idx].hasRead_B[j / block_size])
hipMemcpy2DAsync(d_B + j, hB * sizeof(float), h_B + j, hB * sizeof(float), block_size * sizeof(float), hB, hipMemcpyHostToDevice, curr_stream);
plan[gpu_idx].hasRead_A[i / block_size] = true;
plan[gpu_idx].hasRead_B[j / block_size] = true;
unsigned int offsetX = j, offsetY = i;
hipLaunchKernelGGL(( matrixMultiply<block_size>) , dim3(grid), dim3(threads), 0, curr_stream , plan[gpu_idx].d_A, plan[gpu_idx].d_B, plan[gpu_idx].d_C, dimsA.x, dimsB.x, offsetX, offsetY);
hipMemcpy2DAsync(h_C + i * wC + j, wC * sizeof(float), d_C + i * wC + j, wC * sizeof(float), block_size * sizeof(float), block_size, hipMemcpyDeviceToHost, curr_stream);
// print_gpu_content <<<1, 1, 0, curr_stream>>>(d_A, d_B, d_C, wA, hA, wB, hB, wC, hC, offsetX, offsetY);
// hipStreamSynchronize(curr_stream);
gpu_idx = (gpu_idx + 1) % GPU_N;
}
checkCudaErrors(hipEventRecord(stop));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / niter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
} | 02ae2461aa7146ce6ab8f6380101d70b93cdc039.cu | #include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
// error handling
#include <helper_cuda.h>
// SDK package
#include <helper_functions.h>
#define MAX_GPU_COUNT 32
typedef struct{
float *d_A;
float *d_B;
float *d_C;
cudaStream_t stream;
bool *hasRead_A;
bool *hasRead_B;
} GPUPlan;
template <int block_size> __global__ void matrixMultiply(float *d_A, float *d_B, float *d_C, int wA, int wB, unsigned int offsetX, unsigned int offsetY){
unsigned int x_index = blockDim.x * blockIdx.x + threadIdx.x + offsetX;
unsigned int y_index = blockDim.y * blockIdx.y + threadIdx.y + offsetY;
__shared__ float blockA[block_size][block_size];
__shared__ float blockB[block_size][block_size];
float Csub = 0;
int tx = threadIdx.x, ty = threadIdx.y;
// __syncthreads() acts as a barrier at which all threads in the block must wait before any is allowed to proceed.
for(int i = 0; i < wA / block_size; i++){
blockA[ty][tx] = d_A[y_index * wA + (i * block_size + tx)];
blockB[ty][tx] = d_B[(i * block_size + ty) * wB + x_index];
__syncthreads();
for(int j = 0; j < block_size; j++){
Csub += (blockA[ty][j] * blockB[j][tx]);
}
__syncthreads();
}
d_C[y_index * wA + x_index] = Csub; //blockC[y_index % blockDim.y][x_index % blockDim.x];
}
__global__ void print_gpu_content(float* d_A, float* d_B, float* d_C, int wA, int hA, int wB, int hB, int wC, int hC, unsigned int offsetX, unsigned int offsetY){
printf("At offsetX: %d, offsetY: %d\n", offsetX, offsetY);
printf("d_A:\n");
for(int i = 0; i < hA; i++){
for(int j = 0; j < wA; j++)
printf("%5.1f ", d_A[i * wA + j]);
printf("\n");
}
printf("d_B:\n");
for(int i = 0; i < hB; i++){
for(int j = 0; j < wB; j++)
printf("%5.1f ", d_B[i * wB + j]);
printf("\n");
}
printf("d_C:\n");
for(int i = 0; i < hC; i++){
for(int j = 0; j < wC; j++)
printf("%5.1f ", d_C[i * wC + j]);
printf("\n");
}
}
void print_cpu_content(float* h_A, float* h_B, float* h_C, int wA, int hA, int wB, int hB, int wC, int hC){
printf("h_A:\n");
for(int i = 0; i < hA; i++){
for(int j = 0; j < wA; j++)
printf("%5.1f ", h_A[i * wA + j]);
printf("\n");
}
printf("h_B:\n");
for(int i = 0; i < hB; i++){
for(int j = 0; j < wB; j++)
printf("%5.1f ", h_B[i * wB + j]);
printf("\n");
}
printf("h_C:\n");
for(int i = 0; i < hC; i++){
for(int j = 0; j < wC; j++)
printf("%5.1f ", h_C[i * wC + j]);
printf("\n");
}
}
void init_matrix(float* matrix, int size, float val){
for(int i = 0; i < size; i++)
matrix[i] = i;//val;
}
int main(int argc, char* argv[]){
if(argc < 5){
printf("Usage: ./matrixMulMultiGPUTiling <GPU_N> <A_height> <A_width> <B_height> <B_width>\n");
return 0;
}
int GPU_N, Sys_GPU_N;
GPU_N = atoi(argv[1]);
checkCudaErrors(cudaGetDeviceCount(&Sys_GPU_N));
if(GPU_N > Sys_GPU_N){
printf("GPU count should be less than %d\n", Sys_GPU_N);
}
printf("GPU count: %d\n", GPU_N);
const int dimA_y = atoi(argv[2]), dimA_x = atoi(argv[3]), dimB_y = atoi(argv[4]), dimB_x = atoi(argv[5]);
const int block_size = 32;
GPUPlan plan[MAX_GPU_COUNT];
dim3 dimsA(dimA_x, dimA_y);
dim3 dimsB(dimB_x, dimB_y);
dim3 dimsC(dimB_x, dimA_y);
float *h_A, *h_B, *h_C;
unsigned int size_A = dimsA.x * dimsA.y;
unsigned int mem_size_A = sizeof(float) * size_A;
checkCudaErrors(cudaMallocHost((void**)(&h_A), mem_size_A));
init_matrix(h_A, size_A, 1.0);
unsigned int size_B = dimsB.x * dimsB.y;
unsigned int mem_size_B = sizeof(float) * size_B;
checkCudaErrors(cudaMallocHost((void**)(&h_B), mem_size_B));
init_matrix(h_B, size_B, 2.0);
unsigned int size_C = dimsC.x * dimsC.y;
unsigned int mem_size_C = sizeof(float) * size_C;
checkCudaErrors(cudaMallocHost((void**)(&h_C), mem_size_C));
init_matrix(h_C, size_C, 0.0);
// allocate space for device variable
for(int i = 0; i < GPU_N; i++){
checkCudaErrors(cudaSetDevice(i));
checkCudaErrors(cudaStreamCreate(&plan[i].stream));
checkCudaErrors(cudaMalloc((void **)(&plan[i].d_A), mem_size_A));
checkCudaErrors(cudaMalloc((void **)(&plan[i].d_B), mem_size_B));
checkCudaErrors(cudaMalloc((void **)(&plan[i].d_C), mem_size_C));
plan[i].hasRead_A = (bool*)malloc(dimsC.y / block_size * sizeof(bool));
plan[i].hasRead_B = (bool*)malloc(dimsC.x / block_size * sizeof(bool));
}
int wA = dimsA.x, wB = dimsB.x, wC = dimsC.x;
int hA = dimsA.y, hB = dimsB.y, hC = dimsC.y;
int gpu_idx = 0;
// print_cpu_content(h_A, h_B, h_C, wA, hA, wB, hB, wC, hC);
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaEventRecord(start));
int niter = 1;
dim3 threads(block_size, block_size);
dim3 grid(1, 1);
for(int t = 0; t < niter; t++)
for(int i = 0; i < dimsC.y; i += block_size)
for(int j = 0; j < dimsC.x; j += block_size){
checkCudaErrors(cudaSetDevice(gpu_idx));
cudaStream_t curr_stream = plan[gpu_idx].stream;
float *d_A = plan[gpu_idx].d_A, *d_B = plan[gpu_idx].d_B, *d_C = plan[gpu_idx].d_C;
if(!plan[gpu_idx].hasRead_A[i / block_size])
cudaMemcpy2DAsync(d_A + i * wA, wA * sizeof(float), h_A + i * wA, wA * sizeof(float), wA * sizeof(float), block_size, cudaMemcpyHostToDevice, curr_stream);
if(!plan[gpu_idx].hasRead_B[j / block_size])
cudaMemcpy2DAsync(d_B + j, hB * sizeof(float), h_B + j, hB * sizeof(float), block_size * sizeof(float), hB, cudaMemcpyHostToDevice, curr_stream);
plan[gpu_idx].hasRead_A[i / block_size] = true;
plan[gpu_idx].hasRead_B[j / block_size] = true;
unsigned int offsetX = j, offsetY = i;
matrixMultiply<block_size> <<< grid, threads, 0, curr_stream >>>(plan[gpu_idx].d_A, plan[gpu_idx].d_B, plan[gpu_idx].d_C, dimsA.x, dimsB.x, offsetX, offsetY);
cudaMemcpy2DAsync(h_C + i * wC + j, wC * sizeof(float), d_C + i * wC + j, wC * sizeof(float), block_size * sizeof(float), block_size, cudaMemcpyDeviceToHost, curr_stream);
// print_gpu_content <<<1, 1, 0, curr_stream>>>(d_A, d_B, d_C, wA, hA, wB, hB, wC, hC, offsetX, offsetY);
// cudaStreamSynchronize(curr_stream);
gpu_idx = (gpu_idx + 1) % GPU_N;
}
checkCudaErrors(cudaEventRecord(stop));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / niter;
double flopsPerMatrixMul = 2.0 * static_cast<double>(dimsA.x) *
static_cast<double>(dimsA.y) *
static_cast<double>(dimsB.x);
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) /
(msecPerMatrixMul / 1000.0f);
printf(
"Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops," \
" WorkgroupSize= %u threads/block\n",
gigaFlops,
msecPerMatrixMul,
flopsPerMatrixMul,
threads.x * threads.y);
} |
d0f85387d77d842697b644236ffb4d0d3ad3ec31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
__global__ void reduceBlksKernel1(int * in, int n, int * out)
{
// TODO
}
__global__ void reduceBlksKernel2(int * in, int n, int * out)
{
// TODO
}
__global__ void reduceBlksKernel3(int * in, int n, int * out)
{
// TODO
}
int reduce(int const * in, int n,
bool useDevice=false, dim3 blockSize=dim3(1), int kernelType=1)
{
int result = 0; // Init
if (useDevice == false)
{
result = in[0];
for (int i = 1; i < n; i++)
result += in[i];
}
else // Use device
{
// Allocate device memories
int * d_in, * d_out;
dim3 gridSize(1); // TODO: Compute gridSize from n and blockSize
CHECK(hipMalloc(&d_in, n * sizeof(int)));
CHECK(hipMalloc(&d_out, gridSize.x * sizeof(int)));
// Copy data to device memory
CHECK(hipMemcpy(d_in, in, n*sizeof(int), hipMemcpyHostToDevice));
// Call kernel
GpuTimer timer;
timer.Start();
if (kernelType == 1)
hipLaunchKernelGGL(( reduceBlksKernel1), dim3(gridSize), dim3(blockSize), 0, 0, d_in, n, d_out);
else if (kernelType == 2)
hipLaunchKernelGGL(( reduceBlksKernel2), dim3(gridSize), dim3(blockSize), 0, 0, d_in, n, d_out);
else
hipLaunchKernelGGL(( reduceBlksKernel3), dim3(gridSize), dim3(blockSize), 0, 0, d_in, n, d_out);
timer.Stop();
float kernelTime = timer.Elapsed();
hipDeviceSynchronize();
CHECK(hipGetLastError());
// Copy result from device memory
int * out = (int *)malloc(gridSize.x * sizeof(int));
CHECK(hipMemcpy(out, d_out, gridSize.x*sizeof(int), hipMemcpyDeviceToHost));
// Free device memories
CHECK(hipFree(d_in));
CHECK(hipFree(d_out));
// Host do the rest of the work
timer.Start();
result = out[0];
for (int i = 1; i < gridSize.x; i++)
{
result += out[i];
}
timer.Stop();
float postKernelTime = timer.Elapsed();
// Free memory
free(out);
// Print info
printf("\nKernel %d\n", kernelType);
printf("Grid size: %d, block size: %d\n", gridSize.x, blockSize.x);
printf("Kernel time = %f ms, post-kernel time = %f ms\n", kernelTime, postKernelTime);
}
return result;
}
void checkCorrectness(int r1, int r2)
{
if (r1 == r2)
printf("CORRECT :)\n");
else
printf("INCORRECT :(\n");
}
void printDeviceInfo()
{
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %lu bytes\n", devProv.totalGlobalMem);
printf("****************************\n\n");
}
int main(int argc, char ** argv)
{
printDeviceInfo();
// Set up input size
int n = (1 << 24) + 1;
printf("Input size: %d\n", n);
// Set up input data
int * in = (int *) malloc(n * sizeof(int));
for (int i = 0; i < n; i++)
{
// Generate a random integer in [0, 255]
in[i] = (int)(rand() & 0xFF);
}
// Reduce NOT using device
int correctResult = reduce(in, n);
// Reduce using device, kernel1
dim3 blockSize(512); // Default
if (argc == 2)
blockSize.x = atoi(argv[1]);
int result1 = reduce(in, n, true, blockSize, 1);
checkCorrectness(result1, correctResult);
// Reduce using device, kernel2
int result2 = reduce(in, n, true, blockSize, 2);
checkCorrectness(result2, correctResult);
// Reduce using device, kernel3
int result3 = reduce(in, n, true, blockSize, 3);
checkCorrectness(result3, correctResult);
// Free memories
free(in);
} | d0f85387d77d842697b644236ffb4d0d3ad3ec31.cu | #include <stdio.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
__global__ void reduceBlksKernel1(int * in, int n, int * out)
{
// TODO
}
__global__ void reduceBlksKernel2(int * in, int n, int * out)
{
// TODO
}
__global__ void reduceBlksKernel3(int * in, int n, int * out)
{
// TODO
}
int reduce(int const * in, int n,
bool useDevice=false, dim3 blockSize=dim3(1), int kernelType=1)
{
int result = 0; // Init
if (useDevice == false)
{
result = in[0];
for (int i = 1; i < n; i++)
result += in[i];
}
else // Use device
{
// Allocate device memories
int * d_in, * d_out;
dim3 gridSize(1); // TODO: Compute gridSize from n and blockSize
CHECK(cudaMalloc(&d_in, n * sizeof(int)));
CHECK(cudaMalloc(&d_out, gridSize.x * sizeof(int)));
// Copy data to device memory
CHECK(cudaMemcpy(d_in, in, n*sizeof(int), cudaMemcpyHostToDevice));
// Call kernel
GpuTimer timer;
timer.Start();
if (kernelType == 1)
reduceBlksKernel1<<<gridSize, blockSize>>>(d_in, n, d_out);
else if (kernelType == 2)
reduceBlksKernel2<<<gridSize, blockSize>>>(d_in, n, d_out);
else
reduceBlksKernel3<<<gridSize, blockSize>>>(d_in, n, d_out);
timer.Stop();
float kernelTime = timer.Elapsed();
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
// Copy result from device memory
int * out = (int *)malloc(gridSize.x * sizeof(int));
CHECK(cudaMemcpy(out, d_out, gridSize.x*sizeof(int), cudaMemcpyDeviceToHost));
// Free device memories
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_out));
// Host do the rest of the work
timer.Start();
result = out[0];
for (int i = 1; i < gridSize.x; i++)
{
result += out[i];
}
timer.Stop();
float postKernelTime = timer.Elapsed();
// Free memory
free(out);
// Print info
printf("\nKernel %d\n", kernelType);
printf("Grid size: %d, block size: %d\n", gridSize.x, blockSize.x);
printf("Kernel time = %f ms, post-kernel time = %f ms\n", kernelTime, postKernelTime);
}
return result;
}
void checkCorrectness(int r1, int r2)
{
if (r1 == r2)
printf("CORRECT :)\n");
else
printf("INCORRECT :(\n");
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %lu bytes\n", devProv.totalGlobalMem);
printf("****************************\n\n");
}
int main(int argc, char ** argv)
{
printDeviceInfo();
// Set up input size
int n = (1 << 24) + 1;
printf("Input size: %d\n", n);
// Set up input data
int * in = (int *) malloc(n * sizeof(int));
for (int i = 0; i < n; i++)
{
// Generate a random integer in [0, 255]
in[i] = (int)(rand() & 0xFF);
}
// Reduce NOT using device
int correctResult = reduce(in, n);
// Reduce using device, kernel1
dim3 blockSize(512); // Default
if (argc == 2)
blockSize.x = atoi(argv[1]);
int result1 = reduce(in, n, true, blockSize, 1);
checkCorrectness(result1, correctResult);
// Reduce using device, kernel2
int result2 = reduce(in, n, true, blockSize, 2);
checkCorrectness(result2, correctResult);
// Reduce using device, kernel3
int result3 = reduce(in, n, true, blockSize, 3);
checkCorrectness(result3, correctResult);
// Free memories
free(in);
} |
abae64a5ae0994201dd2547d43814e6459f07735.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated c Wed Aug 14 12:16:42 2013
@author Stan Tomov
*/
#include "common_magma.h"
#define PRECISION_c
#include "commonblas.h"
__global__ void ctranspose3_32( magmaFloatComplex *B, int ldb,
const magmaFloatComplex *A, int lda,
int m, int m32, int n, int n32)
{
__shared__ magmaFloatComplex sA[32][CSIZE_1SHARED+1];
int inx = threadIdx.x;
int iny = threadIdx.y;
int ibx = blockIdx.x*32;
int iby = blockIdx.y*32;
A += ibx + inx + __mul24( iby + iny, lda );
B += iby + inx + __mul24( ibx + iny, ldb );
int t2 = iby+iny;
if (ibx+inx < m) {
if (t2 < n) {
sA[iny+0][inx] = A[0*lda];
if (t2+ 8 < n) {
sA[iny+8][inx] = A[8*lda];
if (t2 + 16 < n) {
sA[iny+16][inx] = A[16*lda];
if (t2 + 24 < n) {
sA[iny+24][inx] = A[24*lda];
}
}
}
}
}
__syncthreads();
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c)
if (iby + inx < n) {
if (ibx + iny < m) {
B[0*ldb] = sA[inx][iny+0];
if (ibx + iny + 8 < m) {
B[8*ldb] = sA[inx][iny+8];
if (ibx + iny +16 < m) {
B[16*ldb] = sA[inx][iny+16];
if (ibx + iny + 24 < m) {
B[24*ldb] = sA[inx][iny+24];
}
}
}
}
}
#else /* defined(PRECISION_z) */
if (iby + inx < n) {
if (ibx + iny < m) {
B[0*ldb] = sA[inx][iny+0];
if (ibx + iny + 8 < m) {
B[8*ldb] = sA[inx][iny+8];
}
}
if (iby + inx + 16 < n) {
if (ibx + iny < m) {
B[0*ldb+16] = sA[inx+16][iny+0];
if (ibx + iny + 8 < m) {
B[8*ldb+16] = sA[inx+16][iny+8];
}
}
}
}
__syncthreads();
A += CSIZE_1SHARED;
B += __mul24( 16, ldb );
sA[iny+ 0][inx] = A[ 0*lda];
sA[iny+ 8][inx] = A[ 8*lda];
sA[iny+16][inx] = A[16*lda];
sA[iny+24][inx] = A[24*lda];
__syncthreads();
if (iby + inx < n) {
if (ibx + iny + 16 < m) {
B[0*ldb] = sA[inx][iny+0];
if (ibx + iny + 24 < m) {
B[8*ldb] = sA[inx][iny+8];
}
}
if (iby + inx + 16 < n) {
if (ibx + iny + 16 < m) {
B[0*ldb+16] = sA[inx+16][iny+0];
if (ibx + iny + 24 < m) {
B[8*ldb+16] = sA[inx+16][iny+8];
}
}
}
}
#endif
}
__global__ void ctranspose2_32( magmaFloatComplex *B, int ldb,
const magmaFloatComplex *A, int lda,
int m, int m32, int n, int n32)
{
__shared__ magmaFloatComplex sA[32][CSIZE_1SHARED+1];
int inx = threadIdx.x;
int iny = threadIdx.y;
int ibx = blockIdx.x*32;
int iby = blockIdx.y*32;
int dx, dy;
if (ibx+32 < m)
dx = 0;
else
dx = m32;
if (iby+32 < n)
dy = 0;
else
dy = n32;
A += ibx + inx - dx + __mul24( iby + iny - dy, lda );
B += iby + inx - dy + __mul24( ibx + iny - dx, ldb );
sA[iny+0][inx] = A[0*lda];
sA[iny+8][inx] = A[8*lda];
sA[iny+16][inx] = A[16*lda];
sA[iny+24][inx] = A[24*lda];
__syncthreads();
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c)
B[ 0*ldb] = sA[inx][iny+0];
B[ 8*ldb] = sA[inx][iny+8];
B[16*ldb] = sA[inx][iny+16];
B[24*ldb] = sA[inx][iny+24];
#else /* defined(PRECISION_z) */
B[0*ldb] = sA[inx][iny+0];
B[8*ldb] = sA[inx][iny+8];
B[0*ldb+16] = sA[inx+16][iny+0];
B[8*ldb+16] = sA[inx+16][iny+8];
__syncthreads();
A += CSIZE_1SHARED;
B += __mul24( 16, ldb );
sA[iny+ 0][inx] = A[ 0*lda];
sA[iny+ 8][inx] = A[ 8*lda];
sA[iny+16][inx] = A[16*lda];
sA[iny+24][inx] = A[24*lda];
__syncthreads();
B[0*ldb] = sA[inx ][iny+0];
B[8*ldb] = sA[inx ][iny+8];
B[0*ldb+16] = sA[inx+16][iny+0];
B[8*ldb+16] = sA[inx+16][iny+8];
#endif
}
//
// m, n - dimensions in the source (input) matrix
// This version transposes for general m, n .
// Note that ldi >= m and ldo >= n.
//
extern "C" void
magmablas_ctranspose2(magmaFloatComplex *odata, magma_int_t ldo,
const magmaFloatComplex *idata, magma_int_t ldi,
magma_int_t m, magma_int_t n )
{
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( CSIZE_1SHARED, 8, 1 );
dim3 grid( (m+31)/32, (n+31)/32, 1 );
hipLaunchKernelGGL(( ctranspose3_32), dim3(grid), dim3(threads), 0, magma_stream ,
odata, ldo, idata, ldi, m, (32-m%32)%32, n, (32-n%32)%32 );
}
extern "C" void
magmablas_ctranspose2s(magmaFloatComplex *odata, magma_int_t ldo,
const magmaFloatComplex *idata, magma_int_t ldi,
magma_int_t m, magma_int_t n, magma_queue_t stream )
{
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( CSIZE_1SHARED, 8, 1 );
dim3 grid( (m+31)/32, (n+31)/32, 1 );
hipLaunchKernelGGL(( ctranspose3_32), dim3(grid), dim3(threads), 0, stream ,
odata, ldo, idata, ldi, m, (32-m%32)%32, n, (32-n%32)%32 );
}
| abae64a5ae0994201dd2547d43814e6459f07735.cu | /*
-- MAGMA (version 1.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
August 2013
@generated c Wed Aug 14 12:16:42 2013
@author Stan Tomov
*/
#include "common_magma.h"
#define PRECISION_c
#include "commonblas.h"
__global__ void ctranspose3_32( magmaFloatComplex *B, int ldb,
const magmaFloatComplex *A, int lda,
int m, int m32, int n, int n32)
{
__shared__ magmaFloatComplex sA[32][CSIZE_1SHARED+1];
int inx = threadIdx.x;
int iny = threadIdx.y;
int ibx = blockIdx.x*32;
int iby = blockIdx.y*32;
A += ibx + inx + __mul24( iby + iny, lda );
B += iby + inx + __mul24( ibx + iny, ldb );
int t2 = iby+iny;
if (ibx+inx < m) {
if (t2 < n) {
sA[iny+0][inx] = A[0*lda];
if (t2+ 8 < n) {
sA[iny+8][inx] = A[8*lda];
if (t2 + 16 < n) {
sA[iny+16][inx] = A[16*lda];
if (t2 + 24 < n) {
sA[iny+24][inx] = A[24*lda];
}
}
}
}
}
__syncthreads();
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c)
if (iby + inx < n) {
if (ibx + iny < m) {
B[0*ldb] = sA[inx][iny+0];
if (ibx + iny + 8 < m) {
B[8*ldb] = sA[inx][iny+8];
if (ibx + iny +16 < m) {
B[16*ldb] = sA[inx][iny+16];
if (ibx + iny + 24 < m) {
B[24*ldb] = sA[inx][iny+24];
}
}
}
}
}
#else /* defined(PRECISION_z) */
if (iby + inx < n) {
if (ibx + iny < m) {
B[0*ldb] = sA[inx][iny+0];
if (ibx + iny + 8 < m) {
B[8*ldb] = sA[inx][iny+8];
}
}
if (iby + inx + 16 < n) {
if (ibx + iny < m) {
B[0*ldb+16] = sA[inx+16][iny+0];
if (ibx + iny + 8 < m) {
B[8*ldb+16] = sA[inx+16][iny+8];
}
}
}
}
__syncthreads();
A += CSIZE_1SHARED;
B += __mul24( 16, ldb );
sA[iny+ 0][inx] = A[ 0*lda];
sA[iny+ 8][inx] = A[ 8*lda];
sA[iny+16][inx] = A[16*lda];
sA[iny+24][inx] = A[24*lda];
__syncthreads();
if (iby + inx < n) {
if (ibx + iny + 16 < m) {
B[0*ldb] = sA[inx][iny+0];
if (ibx + iny + 24 < m) {
B[8*ldb] = sA[inx][iny+8];
}
}
if (iby + inx + 16 < n) {
if (ibx + iny + 16 < m) {
B[0*ldb+16] = sA[inx+16][iny+0];
if (ibx + iny + 24 < m) {
B[8*ldb+16] = sA[inx+16][iny+8];
}
}
}
}
#endif
}
__global__ void ctranspose2_32( magmaFloatComplex *B, int ldb,
const magmaFloatComplex *A, int lda,
int m, int m32, int n, int n32)
{
__shared__ magmaFloatComplex sA[32][CSIZE_1SHARED+1];
int inx = threadIdx.x;
int iny = threadIdx.y;
int ibx = blockIdx.x*32;
int iby = blockIdx.y*32;
int dx, dy;
if (ibx+32 < m)
dx = 0;
else
dx = m32;
if (iby+32 < n)
dy = 0;
else
dy = n32;
A += ibx + inx - dx + __mul24( iby + iny - dy, lda );
B += iby + inx - dy + __mul24( ibx + iny - dx, ldb );
sA[iny+0][inx] = A[0*lda];
sA[iny+8][inx] = A[8*lda];
sA[iny+16][inx] = A[16*lda];
sA[iny+24][inx] = A[24*lda];
__syncthreads();
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c)
B[ 0*ldb] = sA[inx][iny+0];
B[ 8*ldb] = sA[inx][iny+8];
B[16*ldb] = sA[inx][iny+16];
B[24*ldb] = sA[inx][iny+24];
#else /* defined(PRECISION_z) */
B[0*ldb] = sA[inx][iny+0];
B[8*ldb] = sA[inx][iny+8];
B[0*ldb+16] = sA[inx+16][iny+0];
B[8*ldb+16] = sA[inx+16][iny+8];
__syncthreads();
A += CSIZE_1SHARED;
B += __mul24( 16, ldb );
sA[iny+ 0][inx] = A[ 0*lda];
sA[iny+ 8][inx] = A[ 8*lda];
sA[iny+16][inx] = A[16*lda];
sA[iny+24][inx] = A[24*lda];
__syncthreads();
B[0*ldb] = sA[inx ][iny+0];
B[8*ldb] = sA[inx ][iny+8];
B[0*ldb+16] = sA[inx+16][iny+0];
B[8*ldb+16] = sA[inx+16][iny+8];
#endif
}
//
// m, n - dimensions in the source (input) matrix
// This version transposes for general m, n .
// Note that ldi >= m and ldo >= n.
//
extern "C" void
magmablas_ctranspose2(magmaFloatComplex *odata, magma_int_t ldo,
const magmaFloatComplex *idata, magma_int_t ldi,
magma_int_t m, magma_int_t n )
{
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( CSIZE_1SHARED, 8, 1 );
dim3 grid( (m+31)/32, (n+31)/32, 1 );
ctranspose3_32<<< grid, threads, 0, magma_stream >>>(
odata, ldo, idata, ldi, m, (32-m%32)%32, n, (32-n%32)%32 );
}
extern "C" void
magmablas_ctranspose2s(magmaFloatComplex *odata, magma_int_t ldo,
const magmaFloatComplex *idata, magma_int_t ldi,
magma_int_t m, magma_int_t n, magma_queue_t stream )
{
/* Quick return */
if ( (m == 0) || (n == 0) )
return;
dim3 threads( CSIZE_1SHARED, 8, 1 );
dim3 grid( (m+31)/32, (n+31)/32, 1 );
ctranspose3_32<<< grid, threads, 0, stream >>>(
odata, ldo, idata, ldi, m, (32-m%32)%32, n, (32-n%32)%32 );
}
|
0ee50cd0b0f119887a12a1ff0d51726ac1cec527.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2020 XGBoost contributors
*/
#include <memory>
#include <type_traits>
#include <algorithm>
#include "../common/hist_util.cuh"
#include "simple_batch_iterator.h"
#include "iterative_device_dmatrix.h"
#include "sparse_page_source.h"
#include "ellpack_page.cuh"
#include "proxy_dmatrix.h"
#include "proxy_dmatrix.cuh"
#include "device_adapter_hip.cuh"
namespace xgboost {
namespace data {
void IterativeDeviceDMatrix::Initialize(DataIterHandle iter_handle, float missing, int nthread) {
// A handle passed to external iterator.
DMatrixProxy* proxy = MakeProxy(proxy_);
CHECK(proxy);
// The external iterator
auto iter = DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{
iter_handle, reset_, next_};
dh::XGBCachingDeviceAllocator<char> alloc;
auto num_rows = [&]() {
return Dispatch(proxy, [](auto const &value) { return value.NumRows(); });
};
auto num_cols = [&]() {
return Dispatch(proxy, [](auto const &value) { return value.NumCols(); });
};
size_t row_stride = 0;
size_t nnz = 0;
// Sketch for all batches.
iter.Reset();
std::vector<common::SketchContainer> sketch_containers;
size_t batches = 0;
size_t accumulated_rows = 0;
bst_feature_t cols = 0;
int32_t current_device;
dh::safe_cuda(hipGetDevice(¤t_device));
auto get_device = [&]() -> int32_t {
int32_t d = (ctx_.gpu_id == Context::kCpuId) ? current_device : ctx_.gpu_id;
CHECK_NE(d, Context::kCpuId);
return d;
};
while (iter.Next()) {
ctx_.gpu_id = proxy->DeviceIdx();
CHECK_LT(ctx_.gpu_id, common::AllVisibleGPUs());
dh::safe_cuda(hipSetDevice(get_device()));
if (cols == 0) {
cols = num_cols();
rabit::Allreduce<rabit::op::Max>(&cols, 1);
} else {
CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns.";
}
sketch_containers.emplace_back(proxy->Info().feature_types,
batch_param_.max_bin, cols, num_rows(), get_device());
auto* p_sketch = &sketch_containers.back();
proxy->Info().weights_.SetDevice(get_device());
Dispatch(proxy, [&](auto const &value) {
common::AdapterDeviceSketch(value, batch_param_.max_bin,
proxy->Info(), missing, p_sketch);
});
auto batch_rows = num_rows();
accumulated_rows += batch_rows;
dh::caching_device_vector<size_t> row_counts(batch_rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(),
row_counts.size());
row_stride = ::max(row_stride, Dispatch(proxy, [=](auto const &value) {
return GetRowCounts(value, row_counts_span,
get_device(), missing);
}));
nnz += thrust::reduce(thrust::hip::par(alloc), row_counts.begin(),
row_counts.end());
batches++;
}
iter.Reset();
dh::safe_cuda(hipSetDevice(get_device()));
HostDeviceVector<FeatureType> ft;
common::SketchContainer final_sketch(
sketch_containers.empty() ? ft : sketch_containers.front().FeatureTypes(),
batch_param_.max_bin, cols, accumulated_rows, get_device());
for (auto const& sketch : sketch_containers) {
final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data());
final_sketch.FixError();
}
sketch_containers.clear();
sketch_containers.shrink_to_fit();
common::HistogramCuts cuts;
final_sketch.MakeCuts(&cuts);
this->info_.num_col_ = cols;
this->info_.num_row_ = accumulated_rows;
this->info_.num_nonzero_ = nnz;
auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows,
get_device]() {
if (!page_) {
// Should be put inside the while loop to protect against empty batch. In
// that case device id is invalid.
page_.reset(new EllpackPage);
*(page_->Impl()) = EllpackPageImpl(get_device(), cuts, this->IsDense(),
row_stride, accumulated_rows);
}
};
// Construct the final ellpack page.
size_t offset = 0;
iter.Reset();
size_t n_batches_for_verification = 0;
while (iter.Next()) {
init_page();
dh::safe_cuda(hipSetDevice(get_device()));
auto rows = num_rows();
dh::caching_device_vector<size_t> row_counts(rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(),
row_counts.size());
Dispatch(proxy, [=](auto const& value) {
return GetRowCounts(value, row_counts_span, get_device(), missing);
});
auto is_dense = this->IsDense();
proxy->Info().feature_types.SetDevice(get_device());
auto d_feature_types = proxy->Info().feature_types.ConstDeviceSpan();
auto new_impl = Dispatch(proxy, [&](auto const &value) {
return EllpackPageImpl(value, missing, get_device(), is_dense, nthread,
row_counts_span, d_feature_types, row_stride, rows,
cols, cuts);
});
size_t num_elements = page_->Impl()->Copy(get_device(), &new_impl, offset);
offset += num_elements;
proxy->Info().num_row_ = num_rows();
proxy->Info().num_col_ = cols;
if (batches != 1) {
this->info_.Extend(std::move(proxy->Info()), false, true);
}
n_batches_for_verification++;
}
CHECK_EQ(batches, n_batches_for_verification)
<< "Different number of batches returned between 2 iterations";
if (batches == 1) {
this->info_ = std::move(proxy->Info());
this->info_.num_nonzero_ = nnz;
CHECK_EQ(proxy->Info().labels.Size(), 0);
}
iter.Reset();
// Synchronise worker columns
rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1);
}
BatchSet<EllpackPage> IterativeDeviceDMatrix::GetEllpackBatches(const BatchParam& param) {
CHECK(page_);
auto begin_iter =
BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(page_));
return BatchSet<EllpackPage>(begin_iter);
}
} // namespace data
} // namespace xgboost
| 0ee50cd0b0f119887a12a1ff0d51726ac1cec527.cu | /*!
* Copyright 2020 XGBoost contributors
*/
#include <memory>
#include <type_traits>
#include <algorithm>
#include "../common/hist_util.cuh"
#include "simple_batch_iterator.h"
#include "iterative_device_dmatrix.h"
#include "sparse_page_source.h"
#include "ellpack_page.cuh"
#include "proxy_dmatrix.h"
#include "proxy_dmatrix.cuh"
#include "device_adapter.cuh"
namespace xgboost {
namespace data {
void IterativeDeviceDMatrix::Initialize(DataIterHandle iter_handle, float missing, int nthread) {
// A handle passed to external iterator.
DMatrixProxy* proxy = MakeProxy(proxy_);
CHECK(proxy);
// The external iterator
auto iter = DataIterProxy<DataIterResetCallback, XGDMatrixCallbackNext>{
iter_handle, reset_, next_};
dh::XGBCachingDeviceAllocator<char> alloc;
auto num_rows = [&]() {
return Dispatch(proxy, [](auto const &value) { return value.NumRows(); });
};
auto num_cols = [&]() {
return Dispatch(proxy, [](auto const &value) { return value.NumCols(); });
};
size_t row_stride = 0;
size_t nnz = 0;
// Sketch for all batches.
iter.Reset();
std::vector<common::SketchContainer> sketch_containers;
size_t batches = 0;
size_t accumulated_rows = 0;
bst_feature_t cols = 0;
int32_t current_device;
dh::safe_cuda(cudaGetDevice(¤t_device));
auto get_device = [&]() -> int32_t {
int32_t d = (ctx_.gpu_id == Context::kCpuId) ? current_device : ctx_.gpu_id;
CHECK_NE(d, Context::kCpuId);
return d;
};
while (iter.Next()) {
ctx_.gpu_id = proxy->DeviceIdx();
CHECK_LT(ctx_.gpu_id, common::AllVisibleGPUs());
dh::safe_cuda(cudaSetDevice(get_device()));
if (cols == 0) {
cols = num_cols();
rabit::Allreduce<rabit::op::Max>(&cols, 1);
} else {
CHECK_EQ(cols, num_cols()) << "Inconsistent number of columns.";
}
sketch_containers.emplace_back(proxy->Info().feature_types,
batch_param_.max_bin, cols, num_rows(), get_device());
auto* p_sketch = &sketch_containers.back();
proxy->Info().weights_.SetDevice(get_device());
Dispatch(proxy, [&](auto const &value) {
common::AdapterDeviceSketch(value, batch_param_.max_bin,
proxy->Info(), missing, p_sketch);
});
auto batch_rows = num_rows();
accumulated_rows += batch_rows;
dh::caching_device_vector<size_t> row_counts(batch_rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(),
row_counts.size());
row_stride = std::max(row_stride, Dispatch(proxy, [=](auto const &value) {
return GetRowCounts(value, row_counts_span,
get_device(), missing);
}));
nnz += thrust::reduce(thrust::cuda::par(alloc), row_counts.begin(),
row_counts.end());
batches++;
}
iter.Reset();
dh::safe_cuda(cudaSetDevice(get_device()));
HostDeviceVector<FeatureType> ft;
common::SketchContainer final_sketch(
sketch_containers.empty() ? ft : sketch_containers.front().FeatureTypes(),
batch_param_.max_bin, cols, accumulated_rows, get_device());
for (auto const& sketch : sketch_containers) {
final_sketch.Merge(sketch.ColumnsPtr(), sketch.Data());
final_sketch.FixError();
}
sketch_containers.clear();
sketch_containers.shrink_to_fit();
common::HistogramCuts cuts;
final_sketch.MakeCuts(&cuts);
this->info_.num_col_ = cols;
this->info_.num_row_ = accumulated_rows;
this->info_.num_nonzero_ = nnz;
auto init_page = [this, &proxy, &cuts, row_stride, accumulated_rows,
get_device]() {
if (!page_) {
// Should be put inside the while loop to protect against empty batch. In
// that case device id is invalid.
page_.reset(new EllpackPage);
*(page_->Impl()) = EllpackPageImpl(get_device(), cuts, this->IsDense(),
row_stride, accumulated_rows);
}
};
// Construct the final ellpack page.
size_t offset = 0;
iter.Reset();
size_t n_batches_for_verification = 0;
while (iter.Next()) {
init_page();
dh::safe_cuda(cudaSetDevice(get_device()));
auto rows = num_rows();
dh::caching_device_vector<size_t> row_counts(rows + 1, 0);
common::Span<size_t> row_counts_span(row_counts.data().get(),
row_counts.size());
Dispatch(proxy, [=](auto const& value) {
return GetRowCounts(value, row_counts_span, get_device(), missing);
});
auto is_dense = this->IsDense();
proxy->Info().feature_types.SetDevice(get_device());
auto d_feature_types = proxy->Info().feature_types.ConstDeviceSpan();
auto new_impl = Dispatch(proxy, [&](auto const &value) {
return EllpackPageImpl(value, missing, get_device(), is_dense, nthread,
row_counts_span, d_feature_types, row_stride, rows,
cols, cuts);
});
size_t num_elements = page_->Impl()->Copy(get_device(), &new_impl, offset);
offset += num_elements;
proxy->Info().num_row_ = num_rows();
proxy->Info().num_col_ = cols;
if (batches != 1) {
this->info_.Extend(std::move(proxy->Info()), false, true);
}
n_batches_for_verification++;
}
CHECK_EQ(batches, n_batches_for_verification)
<< "Different number of batches returned between 2 iterations";
if (batches == 1) {
this->info_ = std::move(proxy->Info());
this->info_.num_nonzero_ = nnz;
CHECK_EQ(proxy->Info().labels.Size(), 0);
}
iter.Reset();
// Synchronise worker columns
rabit::Allreduce<rabit::op::Max>(&info_.num_col_, 1);
}
BatchSet<EllpackPage> IterativeDeviceDMatrix::GetEllpackBatches(const BatchParam& param) {
CHECK(page_);
auto begin_iter =
BatchIterator<EllpackPage>(new SimpleBatchIteratorImpl<EllpackPage>(page_));
return BatchSet<EllpackPage>(begin_iter);
}
} // namespace data
} // namespace xgboost
|
14d041a7bce7d03e65b2d3bb8bf4135eaf84b33a.hip | // !!! This is a file automatically generated by hipify!!!
/*
Use CUDA functions to calculate block size
*/
#include <iostream>
#include <stdlib.h>
#include <cmath>
#include <string>
#include <ctime>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
// Position struct contains x and y coordinates
struct Position {
float x, y;
std::string toString() {
return "(" + std::to_string(x) + "," + std::to_string(y) + ")";
}
__device__ __host__ void operator+=(const Position& a) {
x = x + a.x;
y = y + a.y;
}
__device__ __host__ void operator=(const Position& a) {
x = a.x;
y = a.y;
}
};
// Particle struct has current location, best location and velocity
struct Particle {
Position best_position;
Position current_position;
Position velocity;
float best_value;
};
const unsigned int N = 5000;
const unsigned int ITERATIONS = 1000;
const float SEARCH_MIN = -1000.0f;
const float SEARCH_MAX = 1000.0f;
const float w = 0.9f;
const float c_ind = 1.0f;
const float c_team = 2.0f;
// return a random float between low and high
float randomFloat(float low, float high) {
float range = high-low;
float pct = static_cast <float>(rand()) / static_cast <float>(RAND_MAX);
return low + pct * range;
}
// function to optimize
__device__ __host__ float calcValue(Position p) {
return pow(p.x, 2) + pow(p.y, 2);
}
// Initialize state for random numbers
__global__ void init_kernel(hiprandState_t *state, long seed) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
hiprand_init(seed, idx, 0, state);
}
// Returns the index of the particle with the best position
__global__ void updateTeamBestIndex(Particle *d_particles, float *d_team_best_value, int *d_team_best_index, int N) {
*d_team_best_value = d_particles[0].best_value;
*d_team_best_index = 0;
for (int i = 1; i < N; i++) {
if (d_particles[i].best_value < *d_team_best_value) {
*d_team_best_value = d_particles[i].best_value;
*d_team_best_index = i;
}
}
}
// Calculate velocity for a particle
// __device__ void updateParticleVelocity(Particle &p, Position team_best_position, float w, float c_ind, float c_team, hiprandState_t *state) {
// float r_ind = hiprand_uniform(state);
// float r_team = hiprand_uniform(state);
// p.velocity.x = w * p.velocity.x +
// r_ind * c_ind * (p.best_position.x - p.current_position.x) +
// r_team * c_team * (team_best_position.x - p.current_position.x);
// p.velocity.y = w * p.velocity.y +
// r_ind * c_ind * (p.best_position.y - p.current_position.y) +
// r_team * c_team * (team_best_position.y - p.current_position.y);
// }
// Update velocity for all particles
__global__ void updateVelocity(Particle* d_particles, int *d_team_best_index, float w, float c_ind, float c_team, int N, hiprandState_t *state) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
// updateParticleVelocity(d_particles[idx], d_particles[*d_team_best_index].best_position, w, c_ind, c_team, state);
float r_ind = hiprand_uniform(state);
float r_team = hiprand_uniform(state);
d_particles[idx].velocity.x = w * d_particles[idx].velocity.x +
r_ind * c_ind * (d_particles[idx].best_position.x - d_particles[idx].current_position.x) +
r_team * c_team * (d_particles[*d_team_best_index].best_position.x - d_particles[idx].current_position.x);
d_particles[idx].velocity.y = w * d_particles[idx].velocity.y +
r_ind * c_ind * (d_particles[idx].best_position.y - d_particles[idx].current_position.y) +
r_team * c_team * (d_particles[*d_team_best_index].best_position.y - d_particles[idx].current_position.y);
}
}
__global__ void updatePosition(Particle *d_particles, int N) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
d_particles[idx].current_position += d_particles[idx].velocity;
float newValue = calcValue(d_particles[idx].current_position);
if (newValue < d_particles[idx].best_value) {
d_particles[idx].best_value = newValue;
d_particles[idx].best_position = d_particles[idx].current_position;
}
}
}
int main(void) {
// for timing
long start = std::clock();
// Random seed for cpu
std::srand(std::time(NULL));
// Random seed for gpu
hiprandState_t *state;
hipMalloc(&state, sizeof(hiprandState_t));
hipLaunchKernelGGL(( init_kernel), dim3(1),dim3(1), 0, 0, state, clock());
// Initialize particles
Particle* h_particles = new Particle[N];
Particle* d_particles; // for the gpu
for (int i = 0; i < N; i++) {
// Random starting position
h_particles[i].current_position.x = randomFloat(SEARCH_MIN, SEARCH_MAX);
h_particles[i].current_position.y = randomFloat(SEARCH_MIN, SEARCH_MAX);
h_particles[i].best_position.x = h_particles[i].current_position.x;
h_particles[i].best_position.y = h_particles[i].current_position.y;
h_particles[i].best_value = calcValue(h_particles[i].best_position);
// Random starting velocity
h_particles[i].velocity.x = randomFloat(SEARCH_MIN, SEARCH_MAX);
h_particles[i].velocity.y = randomFloat(SEARCH_MIN, SEARCH_MAX);
}
// Allocate memory + copy data to gpu
size_t particleSize = sizeof(Particle) * N;
hipMalloc((void **)&d_particles, particleSize);
hipMemcpy(d_particles, h_particles, particleSize, hipMemcpyHostToDevice); // dest, source, size, direction
// initialize variables for gpu
int *d_team_best_index;
float *d_team_best_value;
// Allocate gpu memory
hipMalloc((void **)&d_team_best_index, sizeof(int));
hipMalloc((void **)&d_team_best_value, sizeof(float));
// Initialize team best index and value
hipLaunchKernelGGL(( updateTeamBestIndex), dim3(1),dim3(1), 0, 0, d_particles, d_team_best_value, d_team_best_index, N);
// assign thread and blockcount
int blockSize = 1024;
int gridSize = (N + blockSize - 1) / blockSize;
// For i in interations
for (int i = 0; i < ITERATIONS; i++) {
hipLaunchKernelGGL(( updateVelocity), dim3(gridSize), dim3(blockSize), 0, 0, d_particles, d_team_best_index, w, c_ind, c_team, N, state);
hipLaunchKernelGGL(( updatePosition), dim3(gridSize), dim3(blockSize), 0, 0, d_particles, N);
hipLaunchKernelGGL(( updateTeamBestIndex), dim3(1),dim3(1), 0, 0, d_particles, d_team_best_value, d_team_best_index, N);
}
// copy best particle back to host
int team_best_index;
hipMemcpy(&team_best_index, d_team_best_index, sizeof(int), hipMemcpyDeviceToHost);
// copy particle data back to host
hipMemcpy(h_particles, d_particles, particleSize, hipMemcpyDeviceToHost);
long stop = std::clock();
long elapsed = (stop - start) * 1000 / CLOCKS_PER_SEC;
// print results
std::cout << "Ending Best: " << std::endl;
std::cout << "Team best value: " << h_particles[team_best_index].best_value << std::endl;
std::cout << "Team best position: " << h_particles[team_best_index].best_position.toString() << std::endl;
std::cout << "Run time: " << elapsed << "ms" << std::endl;
hipFree(d_particles);
hipFree(d_team_best_index);
hipFree(d_team_best_value);
hipFree(state);
return 0;
} | 14d041a7bce7d03e65b2d3bb8bf4135eaf84b33a.cu |
/*
Use CUDA functions to calculate block size
*/
#include <iostream>
#include <stdlib.h>
#include <cmath>
#include <string>
#include <ctime>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
// Position struct contains x and y coordinates
struct Position {
float x, y;
std::string toString() {
return "(" + std::to_string(x) + "," + std::to_string(y) + ")";
}
__device__ __host__ void operator+=(const Position& a) {
x = x + a.x;
y = y + a.y;
}
__device__ __host__ void operator=(const Position& a) {
x = a.x;
y = a.y;
}
};
// Particle struct has current location, best location and velocity
struct Particle {
Position best_position;
Position current_position;
Position velocity;
float best_value;
};
const unsigned int N = 5000;
const unsigned int ITERATIONS = 1000;
const float SEARCH_MIN = -1000.0f;
const float SEARCH_MAX = 1000.0f;
const float w = 0.9f;
const float c_ind = 1.0f;
const float c_team = 2.0f;
// return a random float between low and high
float randomFloat(float low, float high) {
float range = high-low;
float pct = static_cast <float>(rand()) / static_cast <float>(RAND_MAX);
return low + pct * range;
}
// function to optimize
__device__ __host__ float calcValue(Position p) {
return pow(p.x, 2) + pow(p.y, 2);
}
// Initialize state for random numbers
__global__ void init_kernel(curandState *state, long seed) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed, idx, 0, state);
}
// Returns the index of the particle with the best position
__global__ void updateTeamBestIndex(Particle *d_particles, float *d_team_best_value, int *d_team_best_index, int N) {
*d_team_best_value = d_particles[0].best_value;
*d_team_best_index = 0;
for (int i = 1; i < N; i++) {
if (d_particles[i].best_value < *d_team_best_value) {
*d_team_best_value = d_particles[i].best_value;
*d_team_best_index = i;
}
}
}
// Calculate velocity for a particle
// __device__ void updateParticleVelocity(Particle &p, Position team_best_position, float w, float c_ind, float c_team, curandState *state) {
// float r_ind = curand_uniform(state);
// float r_team = curand_uniform(state);
// p.velocity.x = w * p.velocity.x +
// r_ind * c_ind * (p.best_position.x - p.current_position.x) +
// r_team * c_team * (team_best_position.x - p.current_position.x);
// p.velocity.y = w * p.velocity.y +
// r_ind * c_ind * (p.best_position.y - p.current_position.y) +
// r_team * c_team * (team_best_position.y - p.current_position.y);
// }
// Update velocity for all particles
__global__ void updateVelocity(Particle* d_particles, int *d_team_best_index, float w, float c_ind, float c_team, int N, curandState *state) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
// updateParticleVelocity(d_particles[idx], d_particles[*d_team_best_index].best_position, w, c_ind, c_team, state);
float r_ind = curand_uniform(state);
float r_team = curand_uniform(state);
d_particles[idx].velocity.x = w * d_particles[idx].velocity.x +
r_ind * c_ind * (d_particles[idx].best_position.x - d_particles[idx].current_position.x) +
r_team * c_team * (d_particles[*d_team_best_index].best_position.x - d_particles[idx].current_position.x);
d_particles[idx].velocity.y = w * d_particles[idx].velocity.y +
r_ind * c_ind * (d_particles[idx].best_position.y - d_particles[idx].current_position.y) +
r_team * c_team * (d_particles[*d_team_best_index].best_position.y - d_particles[idx].current_position.y);
}
}
__global__ void updatePosition(Particle *d_particles, int N) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
d_particles[idx].current_position += d_particles[idx].velocity;
float newValue = calcValue(d_particles[idx].current_position);
if (newValue < d_particles[idx].best_value) {
d_particles[idx].best_value = newValue;
d_particles[idx].best_position = d_particles[idx].current_position;
}
}
}
int main(void) {
// for timing
long start = std::clock();
// Random seed for cpu
std::srand(std::time(NULL));
// Random seed for gpu
curandState *state;
cudaMalloc(&state, sizeof(curandState));
init_kernel<<<1,1>>>(state, clock());
// Initialize particles
Particle* h_particles = new Particle[N];
Particle* d_particles; // for the gpu
for (int i = 0; i < N; i++) {
// Random starting position
h_particles[i].current_position.x = randomFloat(SEARCH_MIN, SEARCH_MAX);
h_particles[i].current_position.y = randomFloat(SEARCH_MIN, SEARCH_MAX);
h_particles[i].best_position.x = h_particles[i].current_position.x;
h_particles[i].best_position.y = h_particles[i].current_position.y;
h_particles[i].best_value = calcValue(h_particles[i].best_position);
// Random starting velocity
h_particles[i].velocity.x = randomFloat(SEARCH_MIN, SEARCH_MAX);
h_particles[i].velocity.y = randomFloat(SEARCH_MIN, SEARCH_MAX);
}
// Allocate memory + copy data to gpu
size_t particleSize = sizeof(Particle) * N;
cudaMalloc((void **)&d_particles, particleSize);
cudaMemcpy(d_particles, h_particles, particleSize, cudaMemcpyHostToDevice); // dest, source, size, direction
// initialize variables for gpu
int *d_team_best_index;
float *d_team_best_value;
// Allocate gpu memory
cudaMalloc((void **)&d_team_best_index, sizeof(int));
cudaMalloc((void **)&d_team_best_value, sizeof(float));
// Initialize team best index and value
updateTeamBestIndex<<<1,1>>>(d_particles, d_team_best_value, d_team_best_index, N);
// assign thread and blockcount
int blockSize = 1024;
int gridSize = (N + blockSize - 1) / blockSize;
// For i in interations
for (int i = 0; i < ITERATIONS; i++) {
updateVelocity<<<gridSize, blockSize>>>(d_particles, d_team_best_index, w, c_ind, c_team, N, state);
updatePosition<<<gridSize, blockSize>>>(d_particles, N);
updateTeamBestIndex<<<1,1>>>(d_particles, d_team_best_value, d_team_best_index, N);
}
// copy best particle back to host
int team_best_index;
cudaMemcpy(&team_best_index, d_team_best_index, sizeof(int), cudaMemcpyDeviceToHost);
// copy particle data back to host
cudaMemcpy(h_particles, d_particles, particleSize, cudaMemcpyDeviceToHost);
long stop = std::clock();
long elapsed = (stop - start) * 1000 / CLOCKS_PER_SEC;
// print results
std::cout << "Ending Best: " << std::endl;
std::cout << "Team best value: " << h_particles[team_best_index].best_value << std::endl;
std::cout << "Team best position: " << h_particles[team_best_index].best_position.toString() << std::endl;
std::cout << "Run time: " << elapsed << "ms" << std::endl;
cudaFree(d_particles);
cudaFree(d_team_best_index);
cudaFree(d_team_best_value);
cudaFree(state);
return 0;
} |
e4ee3ea696ed106dbc74e51d29c49574294bdcc8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <array/NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void unsortedSegmentSqrtNLinearKernel(T* input, Nd4jLong* inputShape, I* indices, Nd4jLong* indicesShape, int* starts, int* lengths, Nd4jLong numOfClasses, T* output, Nd4jLong* outputShape) {
__shared__ Nd4jLong xLen, zLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
}
__syncthreads();
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (auto idx = start; idx < xLen; idx += step) {
auto yIndex = shape::getIndexOffset(idx, indicesShape);
auto segment = indices[yIndex];
auto zIndex = shape::getIndexOffset(segment, outputShape);
if (lengths[segment] == 0) continue;
auto xIndex = shape::getIndexOffset(idx, inputShape);
sd::math::atomics::nd4j_atomicAdd(&output[zIndex], input[xIndex] / sd::math::nd4j_sqrt<int, T>(lengths[segment]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentSqrtN kernel
template <typename T, typename I>
static __global__ void segmentSqrtNTadKernel(T* inputBuf, Nd4jLong* inputShape, Nd4jLong* inputTads, Nd4jLong* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* outputTads, Nd4jLong* outputTadOffsets) {
__shared__ Nd4jLong len, total;
if (threadIdx.x == 0) {
total = shape::sizeAt(inputShape, 0);
len = shape::length(inputTads);
}
__syncthreads();
for (auto idx = blockIdx.x; idx < total; idx += gridDim.x) {
auto segment = indices[idx];
auto x = inputBuf + inputTadOffsets[idx];
auto z = reinterpret_cast<T *>(outputBuf) + outputTadOffsets[segment];
auto start = starts[segment];
auto finish = start + lengths[segment];
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex] / sd::math::nd4j_sqrt<int, T>(lengths[segment]));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentSqrtNFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
// dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32);
dim3 dims(128, 256, 256);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
output->nullify();
if (input->isVector()) {
hipLaunchKernelGGL(( unsortedSegmentSqrtNLinearKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream,
input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(),
indices->dataBuffer()->specialAsT<I>(), indices->specialShapeInfo(), begins, lengths, numOfClasses,
output->dataBuffer()->specialAsT<T>(), output->specialShapeInfo());
}
else {
output->nullify();
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
hipLaunchKernelGGL(( segmentSqrtNTadKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream,
input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(), inputTads, inputTadOffsets, indices->dataBuffer()->specialAsT<I>(),
begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentSqrtNFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSqrtNFunctor_, (context, input, indices, numOfClasses, output),
FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentSqrtNBPLinearKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape, void* indicesBuf, Nd4jLong* indicesShape,
int* lengths, void* outputBuf, Nd4jLong* outputShape) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
z[zOffset] = T(gradOut[gradOffsetO] / math::nd4j_sqrt<int, float>(lengths[classIndex]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentSqrtNBPTadKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape,
void* indicesBuf, Nd4jLong* indicesShape, int* lengths, void* outputBuf, Nd4jLong* outputShape,Nd4jLong* inputTad,
Nd4jLong* inputOffsets, Nd4jLong* gradOutTad, Nd4jLong* gradOutOffsets, Nd4jLong* outTad, Nd4jLong* outOffsets) {
__shared__ T* x;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
// auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[i]; //yIndex];
T* currentOut = z + outOffsets[i];
T* outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
auto zIndex = shape::getIndexOffset(e, outTad);
auto gradIndex = shape::getIndexOffset(e, gradOutTad);
if (lengths[segment] > 0)
currentOut[zIndex] = T(outGrad[gradIndex] / math::nd4j_sqrt<int, float>(lengths[segment]));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static int unsortedSegmentSqrtNFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
hipLaunchKernelGGL(( segmentSqrtNBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
// auto packGradIn = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.getShapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
Nd4jLong* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentSqrtNBPTadKernel<T,I>), dim3(indices->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths,
output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int unsortedSegmentSqrtNFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSqrtNFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
}
}
} | e4ee3ea696ed106dbc74e51d29c49574294bdcc8.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <array/NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void unsortedSegmentSqrtNLinearKernel(T* input, Nd4jLong* inputShape, I* indices, Nd4jLong* indicesShape, int* starts, int* lengths, Nd4jLong numOfClasses, T* output, Nd4jLong* outputShape) {
__shared__ Nd4jLong xLen, zLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
}
__syncthreads();
auto start = threadIdx.x + blockIdx.x * blockDim.x;
auto step = blockDim.x * gridDim.x;
for (auto idx = start; idx < xLen; idx += step) {
auto yIndex = shape::getIndexOffset(idx, indicesShape);
auto segment = indices[yIndex];
auto zIndex = shape::getIndexOffset(segment, outputShape);
if (lengths[segment] == 0) continue;
auto xIndex = shape::getIndexOffset(idx, inputShape);
sd::math::atomics::nd4j_atomicAdd(&output[zIndex], input[xIndex] / sd::math::nd4j_sqrt<int, T>(lengths[segment]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentSqrtN kernel
template <typename T, typename I>
static __global__ void segmentSqrtNTadKernel(T* inputBuf, Nd4jLong* inputShape, Nd4jLong* inputTads, Nd4jLong* inputTadOffsets, I* indices, int* starts, int* lengths, Nd4jLong numOfClasses, void* outputBuf, Nd4jLong* outputShape, Nd4jLong* outputTads, Nd4jLong* outputTadOffsets) {
__shared__ Nd4jLong len, total;
if (threadIdx.x == 0) {
total = shape::sizeAt(inputShape, 0);
len = shape::length(inputTads);
}
__syncthreads();
for (auto idx = blockIdx.x; idx < total; idx += gridDim.x) {
auto segment = indices[idx];
auto x = inputBuf + inputTadOffsets[idx];
auto z = reinterpret_cast<T *>(outputBuf) + outputTadOffsets[segment];
auto start = starts[segment];
auto finish = start + lengths[segment];
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex] / sd::math::nd4j_sqrt<int, T>(lengths[segment]));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentSqrtNFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
// dim3 dims(numOfClasses, indices->lengthOf(), numOfClasses * 32 + 32);
dim3 dims(128, 256, 256);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
output->nullify();
if (input->isVector()) {
unsortedSegmentSqrtNLinearKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(
input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(),
indices->dataBuffer()->specialAsT<I>(), indices->specialShapeInfo(), begins, lengths, numOfClasses,
output->dataBuffer()->specialAsT<T>(), output->specialShapeInfo());
}
else {
output->nullify();
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
segmentSqrtNTadKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(
input->dataBuffer()->specialAsT<T>(), input->specialShapeInfo(), inputTads, inputTadOffsets, indices->dataBuffer()->specialAsT<I>(),
begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentSqrtNFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSqrtNFunctor_, (context, input, indices, numOfClasses, output),
FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentSqrtNBPLinearKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape, void* indicesBuf, Nd4jLong* indicesShape,
int* lengths, void* outputBuf, Nd4jLong* outputShape) {
__shared__ T* x;
__shared__ T* gradIn;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
z[zOffset] = T(gradOut[gradOffsetO] / math::nd4j_sqrt<int, float>(lengths[classIndex]));
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentSqrtNBPTadKernel(void* inputBuf, Nd4jLong* inputShape, void* eps, Nd4jLong* epsShape,
void* indicesBuf, Nd4jLong* indicesShape, int* lengths, void* outputBuf, Nd4jLong* outputShape,Nd4jLong* inputTad,
Nd4jLong* inputOffsets, Nd4jLong* gradOutTad, Nd4jLong* gradOutOffsets, Nd4jLong* outTad, Nd4jLong* outOffsets) {
__shared__ T* x;
__shared__ T* gradOut;
__shared__ I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<T*>(inputBuf);
y = reinterpret_cast<I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
// auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[i]; //yIndex];
T* currentOut = z + outOffsets[i];
T* outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
auto zIndex = shape::getIndexOffset(e, outTad);
auto gradIndex = shape::getIndexOffset(e, gradOutTad);
if (lengths[segment] > 0)
currentOut[zIndex] = T(outGrad[gradIndex] / math::nd4j_sqrt<int, float>(lengths[segment]));
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static int unsortedSegmentSqrtNFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
auto numClasses = indices->e<int>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
segmentSqrtNBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), lengths, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input->getShapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output->getShapeInfo(), dimensions);
// auto packGradIn = sd::ConstantTadHelper::getInstance()->tadForDimensions(tempRes.getShapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance()->tadForDimensions(gradOut->getShapeInfo(), dimensions);
Nd4jLong* inputTads = packX.specialShapeInfo();
Nd4jLong* inputTadOffsets = packX.specialOffsets();
Nd4jLong* outputTads = packZ.specialShapeInfo();
Nd4jLong* outputTadOffsets = packZ.specialOffsets();
Nd4jLong* gradOutTads = packGradOut.specialShapeInfo();
Nd4jLong* gradOutTadOffsets = packGradOut.specialOffsets();
segmentSqrtNBPTadKernel<T,I><<<indices->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), lengths,
output->specialBuffer(), output->specialShapeInfo(), inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int unsortedSegmentSqrtNFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSqrtNFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
}
}
} |
16bb834408d336afcada0c52a0b3a4bcc7928e7a.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <memory>
#include <mutex> // NOLINT
#include "ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h"
#include "c10/hip/HIPFunctions.h"
#include "k2/csrc/context.h"
#include "k2/csrc/log.h"
#include "k2/csrc/pytorch_context.h"
namespace k2 {
static std::once_flag has_cuda_init_flag;
static bool has_cuda = false;
static void InitHasCuda() {
if (torch::cuda::is_available())
has_cuda = true;
else
K2_LOG(WARNING) << "CUDA is not available. Return a CPU context.";
}
class PytorchCpuContext : public Context {
public:
PytorchCpuContext() {
allocator_ = torch::GetAllocator(torch::kCPU);
K2_CHECK(allocator_->raw_deleter() != nullptr);
}
DeviceType GetDeviceType() const override { return kCpu; }
void *Allocate(std::size_t bytes, void **deleter_context) override {
void *p = allocator_->raw_allocate(bytes);
if (deleter_context != nullptr) *deleter_context = nullptr;
return p;
}
void Deallocate(void *data, void *deleter_context) override {
if (deleter_context != nullptr) {
// a non-empty `deleter_context` indicates that
// the memory is passed from a `torch::Tensor`
delete reinterpret_cast<ManagedTensor *>(deleter_context);
} else {
allocator_->raw_deallocate(data);
}
}
bool IsCompatible(const Context &other) const override {
return other.GetDeviceType() == kCpu;
}
void CopyDataTo(size_t num_bytes, const void *src, ContextPtr dst_context,
void *dst) override {
DeviceType device_type = dst_context->GetDeviceType();
switch (device_type) {
case kCpu:
memcpy(dst, src, num_bytes);
break;
case kCuda: {
ContextPtr pinned_context = GetPinnedContext();
auto region = NewRegion(pinned_context, num_bytes);
memcpy(region->data, src, num_bytes);
pinned_context->CopyDataTo(num_bytes, region->data, dst_context, dst);
break;
}
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
break;
}
}
private:
torch::Allocator *allocator_; // NOT owned here
};
class PytorchCudaContext : public Context {
public:
explicit PytorchCudaContext(int32_t gpu_id) : gpu_id_(gpu_id) {
K2_CHECK_GE(gpu_id, 0);
K2_CHECK_LT(gpu_id, c10::hip::device_count());
c10::hip::set_device(gpu_id);
// The internals of `lazyInitCUDA` are executed only once
// so it is fine to invoke lazyInitCUDA() multiple times.
// The call will be inlined since it is defined in the header
// aten/src/ATen/Context.h
at::globalContext().lazyInitCUDA();
allocator_ = c10::hip::HIPCachingAllocatorMasqueradingAsCUDA::get();
K2_CHECK(allocator_->raw_deleter() != nullptr);
}
DeviceType GetDeviceType() const override { return kCuda; }
int32_t GetDeviceId() const override { return gpu_id_; }
hipStream_t GetCudaStream() const override {
return g_stream_override.OverrideStream(
c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(gpu_id_));
}
void *Allocate(std::size_t bytes, void **deleter_context) override {
void *p = allocator_->raw_allocate(bytes);
if (deleter_context != nullptr) *deleter_context = nullptr;
return p;
}
void Deallocate(void *data, void *deleter_context) override {
if (deleter_context != nullptr) {
// a non-empty `deleter_context` indicates that
// the memory is passed from a `torch::Tensor`
delete reinterpret_cast<ManagedTensor *>(deleter_context);
} else {
allocator_->raw_deallocate(data);
}
}
bool IsCompatible(const Context &other) const override {
return other.GetDeviceType() == kCuda && other.GetDeviceId() == gpu_id_;
}
void Sync() const override {
auto ret = hipStreamSynchronize(GetCudaStream());
K2_CHECK_CUDA_ERROR(ret);
}
void CopyDataTo(size_t num_bytes, const void *src, ContextPtr dst_context,
void *dst) override {
DeviceType device_type = dst_context->GetDeviceType();
switch (device_type) {
case kCpu: {
hipError_t ret =
hipMemcpy(dst, src, num_bytes, hipMemcpyDeviceToHost);
K2_CHECK_CUDA_ERROR(ret);
break;
}
case kCuda: {
hipError_t ret =
hipMemcpyAsync(dst, src, num_bytes, hipMemcpyDeviceToDevice,
dst_context->GetCudaStream());
K2_CHECK_CUDA_ERROR(ret);
break;
}
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
break;
}
}
private:
torch::Allocator *allocator_; // NOT owned here
int32_t gpu_id_;
};
ContextPtr GetCpuContext() { return std::make_shared<PytorchCpuContext>(); }
ContextPtr GetCudaContext(int32_t gpu_id /*= -1*/) {
std::call_once(has_cuda_init_flag, InitHasCuda);
if (has_cuda) {
if (gpu_id < 0) gpu_id = c10::hip::current_device();
return std::make_shared<PytorchCudaContext>(gpu_id);
}
return GetCpuContext();
}
RegionPtr NewRegion(torch::Tensor tensor) {
auto ans = std::make_shared<Region>();
if (tensor.device().type() == torch::kCPU) {
ans->context = GetCpuContext();
} else if (tensor.is_cuda()) {
ans->context = GetCudaContext(tensor.device().index());
} else {
K2_LOG(FATAL) << "Unsupported device: " << tensor.device()
<< "\nOnly CPU and CUDA are supported";
}
// NOTE: the tensor is passed from Python and we have
// to retain it to avoid potential segmentation fault.
//
// It will be freed in `Context::Deallocate`.
auto *managed_tensor = new ManagedTensor(tensor);
ans->data = tensor.data_ptr();
ans->deleter_context = managed_tensor;
ans->num_bytes = tensor.storage().nbytes();
ans->bytes_used = ans->num_bytes;
return ans;
}
} // namespace k2
| 16bb834408d336afcada0c52a0b3a4bcc7928e7a.cu | /**
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <memory>
#include <mutex> // NOLINT
#include "c10/cuda/CUDACachingAllocator.h"
#include "c10/cuda/CUDAFunctions.h"
#include "k2/csrc/context.h"
#include "k2/csrc/log.h"
#include "k2/csrc/pytorch_context.h"
namespace k2 {
static std::once_flag has_cuda_init_flag;
static bool has_cuda = false;
static void InitHasCuda() {
if (torch::cuda::is_available())
has_cuda = true;
else
K2_LOG(WARNING) << "CUDA is not available. Return a CPU context.";
}
class PytorchCpuContext : public Context {
public:
PytorchCpuContext() {
allocator_ = torch::GetAllocator(torch::kCPU);
K2_CHECK(allocator_->raw_deleter() != nullptr);
}
DeviceType GetDeviceType() const override { return kCpu; }
void *Allocate(std::size_t bytes, void **deleter_context) override {
void *p = allocator_->raw_allocate(bytes);
if (deleter_context != nullptr) *deleter_context = nullptr;
return p;
}
void Deallocate(void *data, void *deleter_context) override {
if (deleter_context != nullptr) {
// a non-empty `deleter_context` indicates that
// the memory is passed from a `torch::Tensor`
delete reinterpret_cast<ManagedTensor *>(deleter_context);
} else {
allocator_->raw_deallocate(data);
}
}
bool IsCompatible(const Context &other) const override {
return other.GetDeviceType() == kCpu;
}
void CopyDataTo(size_t num_bytes, const void *src, ContextPtr dst_context,
void *dst) override {
DeviceType device_type = dst_context->GetDeviceType();
switch (device_type) {
case kCpu:
memcpy(dst, src, num_bytes);
break;
case kCuda: {
ContextPtr pinned_context = GetPinnedContext();
auto region = NewRegion(pinned_context, num_bytes);
memcpy(region->data, src, num_bytes);
pinned_context->CopyDataTo(num_bytes, region->data, dst_context, dst);
break;
}
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
break;
}
}
private:
torch::Allocator *allocator_; // NOT owned here
};
class PytorchCudaContext : public Context {
public:
explicit PytorchCudaContext(int32_t gpu_id) : gpu_id_(gpu_id) {
K2_CHECK_GE(gpu_id, 0);
K2_CHECK_LT(gpu_id, c10::cuda::device_count());
c10::cuda::set_device(gpu_id);
// The internals of `lazyInitCUDA` are executed only once
// so it is fine to invoke lazyInitCUDA() multiple times.
// The call will be inlined since it is defined in the header
// aten/src/ATen/Context.h
at::globalContext().lazyInitCUDA();
allocator_ = c10::cuda::CUDACachingAllocator::get();
K2_CHECK(allocator_->raw_deleter() != nullptr);
}
DeviceType GetDeviceType() const override { return kCuda; }
int32_t GetDeviceId() const override { return gpu_id_; }
cudaStream_t GetCudaStream() const override {
return g_stream_override.OverrideStream(
c10::cuda::getCurrentCUDAStream(gpu_id_));
}
void *Allocate(std::size_t bytes, void **deleter_context) override {
void *p = allocator_->raw_allocate(bytes);
if (deleter_context != nullptr) *deleter_context = nullptr;
return p;
}
void Deallocate(void *data, void *deleter_context) override {
if (deleter_context != nullptr) {
// a non-empty `deleter_context` indicates that
// the memory is passed from a `torch::Tensor`
delete reinterpret_cast<ManagedTensor *>(deleter_context);
} else {
allocator_->raw_deallocate(data);
}
}
bool IsCompatible(const Context &other) const override {
return other.GetDeviceType() == kCuda && other.GetDeviceId() == gpu_id_;
}
void Sync() const override {
auto ret = cudaStreamSynchronize(GetCudaStream());
K2_CHECK_CUDA_ERROR(ret);
}
void CopyDataTo(size_t num_bytes, const void *src, ContextPtr dst_context,
void *dst) override {
DeviceType device_type = dst_context->GetDeviceType();
switch (device_type) {
case kCpu: {
cudaError_t ret =
cudaMemcpy(dst, src, num_bytes, cudaMemcpyDeviceToHost);
K2_CHECK_CUDA_ERROR(ret);
break;
}
case kCuda: {
cudaError_t ret =
cudaMemcpyAsync(dst, src, num_bytes, cudaMemcpyDeviceToDevice,
dst_context->GetCudaStream());
K2_CHECK_CUDA_ERROR(ret);
break;
}
default:
K2_LOG(FATAL) << "Unsupported device type: " << device_type;
break;
}
}
private:
torch::Allocator *allocator_; // NOT owned here
int32_t gpu_id_;
};
ContextPtr GetCpuContext() { return std::make_shared<PytorchCpuContext>(); }
ContextPtr GetCudaContext(int32_t gpu_id /*= -1*/) {
std::call_once(has_cuda_init_flag, InitHasCuda);
if (has_cuda) {
if (gpu_id < 0) gpu_id = c10::cuda::current_device();
return std::make_shared<PytorchCudaContext>(gpu_id);
}
return GetCpuContext();
}
RegionPtr NewRegion(torch::Tensor tensor) {
auto ans = std::make_shared<Region>();
if (tensor.device().type() == torch::kCPU) {
ans->context = GetCpuContext();
} else if (tensor.is_cuda()) {
ans->context = GetCudaContext(tensor.device().index());
} else {
K2_LOG(FATAL) << "Unsupported device: " << tensor.device()
<< "\nOnly CPU and CUDA are supported";
}
// NOTE: the tensor is passed from Python and we have
// to retain it to avoid potential segmentation fault.
//
// It will be freed in `Context::Deallocate`.
auto *managed_tensor = new ManagedTensor(tensor);
ans->data = tensor.data_ptr();
ans->deleter_context = managed_tensor;
ans->num_bytes = tensor.storage().nbytes();
ans->bytes_used = ans->num_bytes;
return ans;
}
} // namespace k2
|
fec847d86c4df061ab5f3a127654ef0da2954091.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include <hip/driver_types.h>
#include "utils.h"
#define min(a, b) ((a) > (b))? (b): (a)
#define max(a, b) ((a) > (b))? (a): (b)
__device__ void clamp(int & pos, int maxpos) {
pos = pos > 0 ? pos : 0;
pos = pos < (maxpos - 1) ? pos : (maxpos - 1);
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return;
int thread_1d_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
///*
float result = 0.f;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[thread_1d_pos] = result;
//*/
/*
int halfWidth = filterWidth / 2;
float value_final = 0.0f;
for (int y = 0; y < filterWidth; y++){
for (int x = 0; x < filterWidth; x++){
int image_r = static_cast<int>(thread_2D_pos.y + (y - halfWidth));
clamp(image_r, numRows);
int image_c = static_cast<int>(thread_2D_pos.x + (x - halfWidth));
clamp(image_c, numCols);
value_final += filter[y*filterWidth + x] * static_cast<float>(inputChannel[image_r*numCols + image_c]);
}
}
outputChannel[thread_1d_pos] = value_final;
*/
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
//printf(" %d %d %d %d \n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
//printf("rows: %zd -- cols: %zd \n", idx_x, idx_y);
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
const int BLOCK_SIZE = 32;
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(ceil(1.0f*numCols / blockSize.x), ceil(1.0f*numRows / blockSize.y));
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize) , 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur) , dim3(gridSize), dim3(blockSize) , 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur) , dim3(gridSize), dim3(blockSize) , 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur) , dim3(gridSize), dim3(blockSize) , 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| fec847d86c4df061ab5f3a127654ef0da2954091.cu | // Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include <driver_types.h>
#include "utils.h"
#define min(a, b) ((a) > (b))? (b): (a)
#define max(a, b) ((a) > (b))? (a): (b)
__device__ void clamp(int & pos, int maxpos) {
pos = pos > 0 ? pos : 0;
pos = pos < (maxpos - 1) ? pos : (maxpos - 1);
}
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return;
int thread_1d_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
///*
float result = 0.f;
for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) {
for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) {
//Find the global image position for this filter position
//clamp to boundary of the image
int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(numRows - 1));
int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(numCols - 1));
float image_value = static_cast<float>(inputChannel[image_r * numCols + image_c]);
float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2];
result += image_value * filter_value;
}
}
outputChannel[thread_1d_pos] = result;
//*/
/*
int halfWidth = filterWidth / 2;
float value_final = 0.0f;
for (int y = 0; y < filterWidth; y++){
for (int x = 0; x < filterWidth; x++){
int image_r = static_cast<int>(thread_2D_pos.y + (y - halfWidth));
clamp(image_r, numRows);
int image_c = static_cast<int>(thread_2D_pos.x + (x - halfWidth));
clamp(image_c, numCols);
value_final += filter[y*filterWidth + x] * static_cast<float>(inputChannel[image_r*numCols + image_c]);
}
}
outputChannel[thread_1d_pos] = value_final;
*/
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
//printf(" %d %d %d %d \n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
//printf("rows: %zd -- cols: %zd \n", idx_x, idx_y);
redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x;
greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y;
blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z;
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, sizeof(float) * filterWidth * filterWidth, cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
const int BLOCK_SIZE = 32;
//TODO: Set reasonable block size (i.e., number of threads per block)
const dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 gridSize(ceil(1.0f*numCols / blockSize.x), ceil(1.0f*numRows / blockSize.y));
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize >>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur <<<gridSize, blockSize >>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur <<<gridSize, blockSize >>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur <<<gridSize, blockSize >>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
41af1342bc148ba66e531a637b8552acfaf1451c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cube.hip"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_out = NULL;
hipMalloc(&d_out, XSIZE*YSIZE);
float *d_in = NULL;
hipMalloc(&d_in, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cube), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cube), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cube), dim3(gridBlock),dim3(threadBlock), 0, 0, d_out,d_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 41af1342bc148ba66e531a637b8552acfaf1451c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cube.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_out = NULL;
cudaMalloc(&d_out, XSIZE*YSIZE);
float *d_in = NULL;
cudaMalloc(&d_in, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cube<<<gridBlock,threadBlock>>>(d_out,d_in);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cube<<<gridBlock,threadBlock>>>(d_out,d_in);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cube<<<gridBlock,threadBlock>>>(d_out,d_in);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
7cf845a6588b60e9fe3b6aafb8590a25a7e9696b.hip | // !!! This is a file automatically generated by hipify!!!
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <float.h>
#include "blas.h"
#include "convolutional_layer.h"
#include "dark_cuda.h"
#include "maxpool_layer.h"
__global__ void forward_maxpool_depth_layer_kernel(int n, int w, int h, int c,
int out_c, int batch, float* input, float* output, int* indexes)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int j = id % w;
id = id / w;
int i = id % h;
id = id / h;
// int g = id % out_c;
// id = id / out_c;
int b = id % batch;
int k;
for (int g = 0; g < out_c; ++g)
{
int out_index = j + w * (i + h * (g + out_c * b));
float max = -FLT_MAX;
int max_i = -1;
for (k = g; k < c; k += out_c)
{
int in_index = j + w * (i + h * (k + c * b));
float val = input[in_index];
max_i = (val > max) ? in_index : max_i;
max = (val > max) ? val : max;
}
output[out_index] = max;
if (indexes)
indexes[out_index] = max_i;
}
}
__global__ void backward_maxpool_depth_layer_kernel(int n, int w, int h, int c,
int batch, float* delta, float* prev_delta, int* indexes)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int index = indexes[id];
prev_delta[index] += delta[id];
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w,
int in_c, int stride_x, int stride_y, int size, int pad, float* input,
float* output, int* indexes)
{
int h = (in_h + pad - size) / stride_y + 1;
int w = (in_w + pad - size) / stride_x + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w * (i + h * (k + c * b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l)
{
for (m = 0; m < size; ++m)
{
int cur_h = h_offset + i * stride_y + l;
int cur_w = w_offset + j * stride_x + m;
int index = cur_w + in_w * (cur_h + in_h * (k + b * in_c));
int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
if (indexes)
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w,
int in_c, int stride_x, int stride_y, int size, int pad, float* delta,
float* prev_delta, int* indexes)
{
int h = (in_h + pad - size) / stride_y + 1;
int w = (in_w + pad - size) / stride_x + 1;
int c = in_c;
int area_x = (size - 1) / stride_x;
int area_y = (size - 1) / stride_y;
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
float d = 0;
int l, m;
for (l = -area_y; l < area_y + 1; ++l)
{
for (m = -area_x; m < area_x + 1; ++m)
{
int out_w = (j - w_offset) / stride_x + m;
int out_h = (i - h_offset) / stride_y + l;
int out_index = out_w + w * (out_h + h * (k + c * b));
int valid = (out_w >= 0 && out_w < w && out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
void ForwardMaxpoolLayerGpu(layer* l, NetworkState state)
{
if (l->maxpool_depth)
{
int h = l->out_h;
int w = l->out_w;
int c = 1;
size_t n = h * w * c * l->batch;
hipLaunchKernelGGL(( forward_maxpool_depth_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0,
get_cuda_stream(), n, l->w, l->h, l->c, l->out_c, l->batch,
state.input, l->output_gpu, l->indexes_gpu);
CHECK_CUDA(hipPeekAtLastError());
return;
}
#ifdef CUDNN_DISABLED
if (!state.train && l->stride == l->size)
{
float alpha = 1, beta = 0;
cudnnStatus_t maxpool_status = cudnnPoolingForward(cudnn_handle(),
l->poolingDesc, &alpha, l->srcTensorDesc, state.input, &beta,
l->dstTensorDesc, l->output_gpu);
}
else
#endif
{
int h = l->out_h;
int w = l->out_w;
int c = l->out_c;
size_t n = h * w * c * l->batch;
hipLaunchKernelGGL(( forward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0,
get_cuda_stream(), n, l->h, l->w, l->c, l->stride_x, l->stride_y,
l->size, l->pad, state.input, l->output_gpu, l->indexes_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
if (l->antialiasing)
{
NetworkState s = {0};
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
if (!state.train)
s.index = state.index;
s.input = l->output_gpu;
ForwardConvolutionalLayerGpu(l->input_layer, s);
simple_copy_ongpu(
l->outputs * l->batch, l->output_gpu, l->input_antialiasing_gpu);
simple_copy_ongpu(l->input_layer->outputs * l->input_layer->batch,
l->input_layer->output_gpu, l->output_gpu);
}
}
void BackwardMaxpoolLayerGpu(layer* l, NetworkState state)
{
if (l->antialiasing)
{
NetworkState s = {0};
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
s.delta = l->delta_gpu;
s.input = l->input_antialiasing_gpu;
simple_copy_ongpu(l->input_layer->outputs * l->input_layer->batch,
l->delta_gpu, l->input_layer->delta_gpu);
BackwardConvolutionalLayerGpu(l->input_layer, s);
}
if (l->maxpool_depth)
{
int h = l->out_h;
int w = l->out_w;
int c = l->out_c;
size_t n = h * w * c * l->batch;
hipLaunchKernelGGL(( backward_maxpool_depth_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0,
get_cuda_stream(), n, l->w, l->h, l->c, l->batch, l->delta_gpu,
state.delta, l->indexes_gpu);
CHECK_CUDA(hipPeekAtLastError());
return;
}
size_t n = l->h * l->w * l->c * l->batch;
hipLaunchKernelGGL(( backward_maxpool_layer_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0,
get_cuda_stream(), n, l->h, l->w, l->c, l->stride_x, l->stride_y,
l->size, l->pad, l->delta_gpu, state.delta, l->indexes_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
| 7cf845a6588b60e9fe3b6aafb8590a25a7e9696b.cu | #include <cublas_v2.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <float.h>
#include "blas.h"
#include "convolutional_layer.h"
#include "dark_cuda.h"
#include "maxpool_layer.h"
__global__ void forward_maxpool_depth_layer_kernel(int n, int w, int h, int c,
int out_c, int batch, float* input, float* output, int* indexes)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int j = id % w;
id = id / w;
int i = id % h;
id = id / h;
// int g = id % out_c;
// id = id / out_c;
int b = id % batch;
int k;
for (int g = 0; g < out_c; ++g)
{
int out_index = j + w * (i + h * (g + out_c * b));
float max = -FLT_MAX;
int max_i = -1;
for (k = g; k < c; k += out_c)
{
int in_index = j + w * (i + h * (k + c * b));
float val = input[in_index];
max_i = (val > max) ? in_index : max_i;
max = (val > max) ? val : max;
}
output[out_index] = max;
if (indexes)
indexes[out_index] = max_i;
}
}
__global__ void backward_maxpool_depth_layer_kernel(int n, int w, int h, int c,
int batch, float* delta, float* prev_delta, int* indexes)
{
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int index = indexes[id];
prev_delta[index] += delta[id];
}
__global__ void forward_maxpool_layer_kernel(int n, int in_h, int in_w,
int in_c, int stride_x, int stride_y, int size, int pad, float* input,
float* output, int* indexes)
{
int h = (in_h + pad - size) / stride_y + 1;
int w = (in_w + pad - size) / stride_x + 1;
int c = in_c;
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int j = id % w;
id /= w;
int i = id % h;
id /= h;
int k = id % c;
id /= c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
int out_index = j + w * (i + h * (k + c * b));
float max = -INFINITY;
int max_i = -1;
int l, m;
for (l = 0; l < size; ++l)
{
for (m = 0; m < size; ++m)
{
int cur_h = h_offset + i * stride_y + l;
int cur_w = w_offset + j * stride_x + m;
int index = cur_w + in_w * (cur_h + in_h * (k + b * in_c));
int valid = (cur_h >= 0 && cur_h < in_h && cur_w >= 0 && cur_w < in_w);
float val = (valid != 0) ? input[index] : -INFINITY;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
output[out_index] = max;
if (indexes)
indexes[out_index] = max_i;
}
__global__ void backward_maxpool_layer_kernel(int n, int in_h, int in_w,
int in_c, int stride_x, int stride_y, int size, int pad, float* delta,
float* prev_delta, int* indexes)
{
int h = (in_h + pad - size) / stride_y + 1;
int w = (in_w + pad - size) / stride_x + 1;
int c = in_c;
int area_x = (size - 1) / stride_x;
int area_y = (size - 1) / stride_y;
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n)
return;
int index = id;
int j = id % in_w;
id /= in_w;
int i = id % in_h;
id /= in_h;
int k = id % in_c;
id /= in_c;
int b = id;
int w_offset = -pad / 2;
int h_offset = -pad / 2;
float d = 0;
int l, m;
for (l = -area_y; l < area_y + 1; ++l)
{
for (m = -area_x; m < area_x + 1; ++m)
{
int out_w = (j - w_offset) / stride_x + m;
int out_h = (i - h_offset) / stride_y + l;
int out_index = out_w + w * (out_h + h * (k + c * b));
int valid = (out_w >= 0 && out_w < w && out_h >= 0 && out_h < h);
d += (valid && indexes[out_index] == index) ? delta[out_index] : 0;
}
}
prev_delta[index] += d;
}
void ForwardMaxpoolLayerGpu(layer* l, NetworkState state)
{
if (l->maxpool_depth)
{
int h = l->out_h;
int w = l->out_w;
int c = 1;
size_t n = h * w * c * l->batch;
forward_maxpool_depth_layer_kernel<<<cuda_gridsize(n), BLOCK, 0,
get_cuda_stream()>>>(n, l->w, l->h, l->c, l->out_c, l->batch,
state.input, l->output_gpu, l->indexes_gpu);
CHECK_CUDA(cudaPeekAtLastError());
return;
}
#ifdef CUDNN_DISABLED
if (!state.train && l->stride == l->size)
{
float alpha = 1, beta = 0;
cudnnStatus_t maxpool_status = cudnnPoolingForward(cudnn_handle(),
l->poolingDesc, &alpha, l->srcTensorDesc, state.input, &beta,
l->dstTensorDesc, l->output_gpu);
}
else
#endif
{
int h = l->out_h;
int w = l->out_w;
int c = l->out_c;
size_t n = h * w * c * l->batch;
forward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK, 0,
get_cuda_stream()>>>(n, l->h, l->w, l->c, l->stride_x, l->stride_y,
l->size, l->pad, state.input, l->output_gpu, l->indexes_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
if (l->antialiasing)
{
NetworkState s = {0};
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
if (!state.train)
s.index = state.index;
s.input = l->output_gpu;
ForwardConvolutionalLayerGpu(l->input_layer, s);
simple_copy_ongpu(
l->outputs * l->batch, l->output_gpu, l->input_antialiasing_gpu);
simple_copy_ongpu(l->input_layer->outputs * l->input_layer->batch,
l->input_layer->output_gpu, l->output_gpu);
}
}
void BackwardMaxpoolLayerGpu(layer* l, NetworkState state)
{
if (l->antialiasing)
{
NetworkState s = {0};
s.train = state.train;
s.workspace = state.workspace;
s.net = state.net;
s.delta = l->delta_gpu;
s.input = l->input_antialiasing_gpu;
simple_copy_ongpu(l->input_layer->outputs * l->input_layer->batch,
l->delta_gpu, l->input_layer->delta_gpu);
BackwardConvolutionalLayerGpu(l->input_layer, s);
}
if (l->maxpool_depth)
{
int h = l->out_h;
int w = l->out_w;
int c = l->out_c;
size_t n = h * w * c * l->batch;
backward_maxpool_depth_layer_kernel<<<cuda_gridsize(n), BLOCK, 0,
get_cuda_stream()>>>(n, l->w, l->h, l->c, l->batch, l->delta_gpu,
state.delta, l->indexes_gpu);
CHECK_CUDA(cudaPeekAtLastError());
return;
}
size_t n = l->h * l->w * l->c * l->batch;
backward_maxpool_layer_kernel<<<cuda_gridsize(n), BLOCK, 0,
get_cuda_stream()>>>(n, l->h, l->w, l->c, l->stride_x, l->stride_y,
l->size, l->pad, l->delta_gpu, state.delta, l->indexes_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
|
c3d004ac23875ba5d88d3c888bd8b392f5db06cd.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMasked.hip"
#else
void THCTensor_(maskedFill)(THCState* state,
THCTensor *tensor, THCudaByteTensor *mask, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, mask));
THArgCheck(THCTensor_(nElement)(state, tensor) ==
THCudaByteTensor_nElement(state, mask),
2, "sizes do not match");
if (!THC_pointwiseApply2<scalar_t, uint8_t>(state, tensor, mask,
TensorMaskedFillOp<scalar_t, unsigned char>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(maskedFillByte)(THCState* state,
THCTensor *tensor, THByteTensor *mask, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor));
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {});
THCTensor_(copy)(state, maskCuda, mask);
THCTensor_(maskedFill)(state, tensor, maskCuda, value);
THCudaByteTensor_free(state, maskCuda);
}
void THCTensor_(maskedCopy)(THCState* state,
THCTensor *tensor, THCudaByteTensor *mask, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask));
ptrdiff_t maskSize = THCudaByteTensor_nElement(state, mask);
ptrdiff_t tensorSize = THCTensor_(nElement)(state, tensor);
ptrdiff_t srcSize = THCTensor_(nElement)(state, src);
// `mask` and `tensor` must have the same number of elements
THArgCheck(maskSize == tensorSize, 2,
"mask and tensor must have the same number of elements");
// Determine our output size
ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask);
// The number of `1` elements present in the mask must be <= the
// number of elements available in `src`
if (totalElements > srcSize) {
THArgCheck(false, 2, "source nElements must be == mask `1` elements");
}
// FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed
// iterator prefix sums? Convert `mask` to the same datatype as what
// we're accumulating the prefix sum in (int64_t) to get around it
THCudaLongTensor* maskLong = THCudaLongTensor_new(state);
at::IntList maskSizes = mask->sizes();
THCudaLongTensor_resize(state, maskLong, maskSizes, {});
THCTensor_(copy)(state, maskLong, mask);
// Use a prefix sum to determine the output locations of the masked elements
THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state);
THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, {});
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<int64_t>
maskData(THCudaLongTensor_data(state, maskLong));
thrust::device_ptr<int64_t>
maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaLongTensor_nElement(state, maskLong),
maskPrefixSumData);
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
THCTensor* contigSrc = THCTensor_(newContiguous)(state, src);
// update `tensor` where `mask` == 1 but pull from `src` at
// maskPrefixSum
bool status = THC_pointwiseApply3<scalar_t, uint8_t, int64_t>(
state, tensor, mask, maskPrefixSum,
TensorMaskedCopyOp<scalar_t, unsigned char, int64_t>(
THCTensor_(data)(state, contigSrc)));
THCTensor_(free)(state, contigSrc);
THCudaLongTensor_free(state, maskLong);
THCudaLongTensor_free(state, maskPrefixSum);
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(hipGetLastError());
}
void THCTensor_(maskedCopyByte)(THCState* state,
THCTensor *tensor, THByteTensor *mask, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {});
THCTensor_(copy)(state, maskCuda, mask);
THCTensor_(maskedCopy)(state, tensor, maskCuda, src);
THCudaByteTensor_free(state, maskCuda);
}
void THCTensor_(maskedSelect)(THCState* state,
THCTensor* tensor, THCTensor* src, THCudaByteTensor* mask) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask));
THArgCheck(THCudaByteTensor_nElement(state, mask) ==
THCTensor_(nElement)(state, src),
2, "sizes do not match");
// Determine our output size
ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask);
THCTensor* tensorContig = THCTensor_(newContiguous)(state, tensor);
THCTensor_(resize1d)(state, tensorContig, totalElements);
if (tensor != tensorContig) {
THCTensor_(resize1d)(state, tensor, totalElements);
}
// FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed
// iterator prefix sums? Convert `mask` to the same datatype as what
// we're accumulating the prefix sum in (int64_t) to get around it
THCudaLongTensor* maskLong = THCudaLongTensor_new(state);
at::IntList maskSizes = mask->sizes();
THCudaLongTensor_resize(state, maskLong, maskSizes, {});
THCTensor_(copy)(state, maskLong, mask);
// Use a prefix sum to determine the output locations of the masked elements
THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state);
THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, {});
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<int64_t>
maskData(THCudaLongTensor_data(state, maskLong));
thrust::device_ptr<int64_t>
maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaLongTensor_nElement(state, maskLong),
maskPrefixSumData);
// Then copy over the masked elements at their desired output index
bool status = THC_pointwiseApply3<uint8_t, int64_t, scalar_t>(
state, mask, maskPrefixSum,
src, TensorMaskedSelectOp<scalar_t, unsigned char, int64_t>(
THCTensor_(data)(state, tensor)));
THCudaLongTensor_free(state, maskLong);
THCudaLongTensor_free(state, maskPrefixSum);
if (tensor != tensorContig) {
THCTensor_(freeCopyTo)(state, tensorContig, tensor);
} else {
THCTensor_(free)(state, tensorContig);
}
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(hipGetLastError());
}
// FIXME: remove now that we have THCudaByteTensor?
void THCTensor_(maskedSelectByte)(THCState* state,
THCTensor *tensor, THCTensor *src, THByteTensor *mask)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {});
THCTensor_(copy)(state, maskCuda, mask);
THCTensor_(maskedSelect)(state, tensor, src, maskCuda);
THCudaByteTensor_free(state, maskCuda);
}
#endif
| c3d004ac23875ba5d88d3c888bd8b392f5db06cd.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMasked.cu"
#else
void THCTensor_(maskedFill)(THCState* state,
THCTensor *tensor, THCudaByteTensor *mask, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, mask));
THArgCheck(THCTensor_(nElement)(state, tensor) ==
THCudaByteTensor_nElement(state, mask),
2, "sizes do not match");
if (!THC_pointwiseApply2<scalar_t, uint8_t>(state, tensor, mask,
TensorMaskedFillOp<scalar_t, unsigned char>(value))) {
THArgCheck(false, 2, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(maskedFillByte)(THCState* state,
THCTensor *tensor, THByteTensor *mask, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor));
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {});
THCTensor_(copy)(state, maskCuda, mask);
THCTensor_(maskedFill)(state, tensor, maskCuda, value);
THCudaByteTensor_free(state, maskCuda);
}
void THCTensor_(maskedCopy)(THCState* state,
THCTensor *tensor, THCudaByteTensor *mask, THCTensor *src)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask));
ptrdiff_t maskSize = THCudaByteTensor_nElement(state, mask);
ptrdiff_t tensorSize = THCTensor_(nElement)(state, tensor);
ptrdiff_t srcSize = THCTensor_(nElement)(state, src);
// `mask` and `tensor` must have the same number of elements
THArgCheck(maskSize == tensorSize, 2,
"mask and tensor must have the same number of elements");
// Determine our output size
ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask);
// The number of `1` elements present in the mask must be <= the
// number of elements available in `src`
if (totalElements > srcSize) {
THArgCheck(false, 2, "source nElements must be == mask `1` elements");
}
// FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed
// iterator prefix sums? Convert `mask` to the same datatype as what
// we're accumulating the prefix sum in (int64_t) to get around it
THCudaLongTensor* maskLong = THCudaLongTensor_new(state);
at::IntList maskSizes = mask->sizes();
THCudaLongTensor_resize(state, maskLong, maskSizes, {});
THCTensor_(copy)(state, maskLong, mask);
// Use a prefix sum to determine the output locations of the masked elements
THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state);
THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, {});
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<int64_t>
maskData(THCudaLongTensor_data(state, maskLong));
thrust::device_ptr<int64_t>
maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaLongTensor_nElement(state, maskLong),
maskPrefixSumData);
// We are getting elements from `src` based on an offset from
// `maskPrefixSum`, so that should be made contiguous too
THCTensor* contigSrc = THCTensor_(newContiguous)(state, src);
// update `tensor` where `mask` == 1 but pull from `src` at
// maskPrefixSum
bool status = THC_pointwiseApply3<scalar_t, uint8_t, int64_t>(
state, tensor, mask, maskPrefixSum,
TensorMaskedCopyOp<scalar_t, unsigned char, int64_t>(
THCTensor_(data)(state, contigSrc)));
THCTensor_(free)(state, contigSrc);
THCudaLongTensor_free(state, maskLong);
THCudaLongTensor_free(state, maskPrefixSum);
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(cudaGetLastError());
}
void THCTensor_(maskedCopyByte)(THCState* state,
THCTensor *tensor, THByteTensor *mask, THCTensor *src) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {});
THCTensor_(copy)(state, maskCuda, mask);
THCTensor_(maskedCopy)(state, tensor, maskCuda, src);
THCudaByteTensor_free(state, maskCuda);
}
void THCTensor_(maskedSelect)(THCState* state,
THCTensor* tensor, THCTensor* src, THCudaByteTensor* mask) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask));
THArgCheck(THCudaByteTensor_nElement(state, mask) ==
THCTensor_(nElement)(state, src),
2, "sizes do not match");
// Determine our output size
ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask);
THCTensor* tensorContig = THCTensor_(newContiguous)(state, tensor);
THCTensor_(resize1d)(state, tensorContig, totalElements);
if (tensor != tensorContig) {
THCTensor_(resize1d)(state, tensor, totalElements);
}
// FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed
// iterator prefix sums? Convert `mask` to the same datatype as what
// we're accumulating the prefix sum in (int64_t) to get around it
THCudaLongTensor* maskLong = THCudaLongTensor_new(state);
at::IntList maskSizes = mask->sizes();
THCudaLongTensor_resize(state, maskLong, maskSizes, {});
THCTensor_(copy)(state, maskLong, mask);
// Use a prefix sum to determine the output locations of the masked elements
THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state);
THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, {});
THCThrustAllocator thrustAlloc(state);
thrust::device_ptr<int64_t>
maskData(THCudaLongTensor_data(state, maskLong));
thrust::device_ptr<int64_t>
maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum));
thrust::exclusive_scan(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)),
#endif
maskData,
maskData + THCudaLongTensor_nElement(state, maskLong),
maskPrefixSumData);
// Then copy over the masked elements at their desired output index
bool status = THC_pointwiseApply3<uint8_t, int64_t, scalar_t>(
state, mask, maskPrefixSum,
src, TensorMaskedSelectOp<scalar_t, unsigned char, int64_t>(
THCTensor_(data)(state, tensor)));
THCudaLongTensor_free(state, maskLong);
THCudaLongTensor_free(state, maskPrefixSum);
if (tensor != tensorContig) {
THCTensor_(freeCopyTo)(state, tensorContig, tensor);
} else {
THCTensor_(free)(state, tensorContig);
}
THArgCheck(status, 2, CUTORCH_DIM_WARNING);
THCudaCheck(cudaGetLastError());
}
// FIXME: remove now that we have THCudaByteTensor?
void THCTensor_(maskedSelectByte)(THCState* state,
THCTensor *tensor, THCTensor *src, THByteTensor *mask)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src));
THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, mask->sizes(), {});
THCTensor_(copy)(state, maskCuda, mask);
THCTensor_(maskedSelect)(state, tensor, src, maskCuda);
THCudaByteTensor_free(state, maskCuda);
}
#endif
|
750107408582c2fb241be031e364b1e5e4b7152e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#define DLL_EXPORTS
#include "cuda_tc.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <vector>
#include <list>
#include <iostream>
#include <fstream>
#include <sstream>
using thrust::device_vector;
using thrust::host_vector;
#define std_vector std::vector
template<typename T>
static T* raw(std_vector<T>& vector)
{
return &vector[0];
}
#ifdef __host_solver__
#define __solver_access__ __host__ __device__
#else
#define __solver_access__ __device__
#endif
static const char* filePath = "/home/igor/Development/cuda-workspace/cuda_tetriscube/data/solve_call";
bool fileExist(const std::string& name) {
std::ifstream file(name.c_str());
return file;
}
std::string nextFile() {
std::ostringstream s;
int n = 0;
do {
s.clear();
s.str("");
s << filePath << n;
n++;
} while (fileExist(s.str()));
return s.str();
}
std::string lastFile() {
std::ostringstream s;
std::string last;
int n = 0;
do {
last = s.str();
s.clear();
s.str("");
s << filePath << n;
n++;
} while (fileExist(s.str()));
return last;
}
void write(std::ofstream& file, const void* value, size_t size) {
file.write(static_cast<const char*>(value), size);
}
void read(std::ifstream& file, void* value, size_t size) {
file.read(static_cast<char*>(value), size);
}
void SaveToFile(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
int minPiece,
int maxSolutions,
int solversCount)
{
// piecesCount = 12,
// gridSize = 64,
// solutionBufferSize = 5
// candidates[candidateOffsets[gridSize * piecesCount]]
// candidateOffsets[gridSize * piecesCount + 1] (gridIndex * piecesCount + pieceIndex)
// solversStatus[solversCount]
std::string name = nextFile();
std::ofstream file(name.c_str(), std::ios::binary);
char v0[] = { (char)sizeof(int), (char)sizeof(uint64_t) };
write(file, v0, sizeof(v0));
int v1[] = { sizeof(solverStatus), piecesCount, gridSize, 0 };
write(file, v1, sizeof(v1));
write(file, candidateOffsets, (gridSize * piecesCount + 1) * sizeof(int));
write(file, candidates, candidateOffsets[gridSize * piecesCount] * sizeof(uint64_t));
int v2[] = { minPiece, maxSolutions, solversCount };
write(file, v2, sizeof(v2));
write(file, solversStatus, solversCount * sizeof(solverStatus));
}
static void TestEqual(int actual, int expected, const std::string& varname) {
if (actual != expected) {
std::ostringstream s;
s << "Invalid " << varname << " (actual " << actual << ", expected " << expected << ")";
throw s.str();
}
}
void LoadFromFile(
std_vector<uint64_t>& candidates,
std_vector<int>& candidateOffsets,
std_vector<solverStatus>& solversStatus,
int& minPiece,
int& maxSolutions,
int& solversCount)
{
// piecesCount = 12,
// gridSize = 64,
// solutionBufferSize = 5
// candidates[candidateOffsets[gridSize * piecesCount]]
// candidateOffsets[gridSize * piecesCount + 1] (gridIndex * piecesCount + pieceIndex)
// solversStatus[solversCount]
std::string name = lastFile();
if (name.empty()) {
throw "No last file";
}
std::ifstream file(name.c_str(), std::ios::binary);
char v0[2];
read(file, v0, sizeof(v0));
TestEqual(v0[0], sizeof(int), "int size");
TestEqual(v0[1], sizeof(uint64_t), "uint64_t size");
int v1[4];
read(file, v1, sizeof(v1));
TestEqual(v1[0], sizeof(solverStatus), "solverStatus size");
TestEqual(v1[1], piecesCount, "piecesCount");
TestEqual(v1[2], gridSize, "gridSize");
// dummy v1[3]
candidateOffsets.resize(gridSize * piecesCount + 1);
read(file, raw(candidateOffsets), candidateOffsets.size() * sizeof(int));
candidates.resize(candidateOffsets[gridSize * piecesCount]);
read(file, raw(candidates), candidates.size() * sizeof(uint64_t));
int v2[3];
read(file, v2, sizeof(v2));
minPiece = v2[0];
maxSolutions = v2[1];
solversCount = v2[2];
solversStatus.resize(solversCount);
read(file, raw(solversStatus), solversStatus.size() * sizeof(solverStatus));
}
class solverStata {
public:
solverStata(solverStatus* status, int solversCount) {
grid = getMember(status, solversCount, &solverStatus::grid);
actualPiece = getMember(status, solversCount, &solverStatus::actualPiece);
for (int i = 0; i < piecesCount; i++) {
position[i] = getMember(status, solversCount, &solverStatus::position, i);
currentCandidatesIndex[i] = getMember(status, solversCount, &solverStatus::currentCandidatesIndex, i);
permutatorIndices[i] = getMember(status, solversCount, &solverStatus::permutatorIndices, i);
permutatorObjects[i] = getMember(status, solversCount, &solverStatus::permutatorObjects, i);
}
}
private:
template<typename T>
static host_vector<T> getMember(solverStatus* status, int solversCount, T solverStatus::*member) {
host_vector<T> result(solversCount);
for (int i = 0; i < solversCount; i++)
result[i] = status[i].*member;
return result;
}
template<typename T>
static host_vector<T> getMember(solverStatus* status, int solversCount, T (solverStatus::*member)[piecesCount], int index) {
host_vector<T> result(solversCount);
for (int i = 0; i < solversCount; i++)
result[i] = (status[i].*member)[index];
return result;
}
device_vector<uint64_t> grid;
device_vector<int> actualPiece;
device_vector<int> position[piecesCount];
device_vector<int> currentCandidatesIndex[piecesCount];
device_vector<int> permutatorIndices[piecesCount];
device_vector<int> permutatorObjects[piecesCount];
};
enum {
solutionsBufferSize = 10000
};
class solver
{
public:
__solver_access__
solver(const uint64_t* candidates,
const int* candidateOffsets,
solverStatus& status,
solution* solutions,
int* solutionsCount)
: candidates(candidates),
candidateOffsets(candidateOffsets),
status(status),
solutions(solutions),
solutionsCount(solutionsCount) {
}
__solver_access__
void DoStep();
#ifdef __host_solver__
__host__
void Split(int level,
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* statusBuffer,
int& n) const {
solverStatus tempStatus = status;
solver temp(candidates, candidateOffsets, tempStatus, 0, 0);
while (temp.Next()) {
solverStatus ss = tempStatus;
solver s(candidates, candidateOffsets, ss, 0, 0);
s.IncreaseActualPiece();
if (level <= 1) {
if (statusBuffer)
statusBuffer[n] = ss;
n++;
}
else {
s.Split(level - 1, candidates, candidateOffsets, statusBuffer, n);
}
}
}
#endif
private:
__solver_access__
static bool IsValid(uint64_t candidate, uint64_t grid);
__solver_access__
void Swap(int i, int j);
__solver_access__
bool Next();
__solver_access__
void DecreaseActualPiece();
__solver_access__
void IncreaseActualPiece();
__solver_access__
void AddSolution();
const uint64_t* candidates;
const int* candidateOffsets;
solverStatus& status;
solution* solutions;
int* solutionsCount;
};
__solver_access__
bool solver::IsValid(uint64_t candidate, uint64_t grid)
{
return (candidate & grid) == 0;
}
__solver_access__
void solver::Swap(int i, int j)
{
if (i == j)
return;
int* objects = status.permutatorObjects;
int t = objects[i];
objects[i] = objects[j];
objects[j] = t;
}
__solver_access__
bool solver::Next()
{
//const int& actualPiece = status.actualPiece;
int actualPiece = status.actualPiece;
int candidatesOffset = status.position[actualPiece] * piecesCount;
//int index = status.currentCandidatesIndex[actualPiece];
int& index = status.currentCandidatesIndex[actualPiece];
index++;
int candidateNumber = actualPiece + status.permutatorIndices[actualPiece];
Swap(actualPiece, candidateNumber);
for (; candidateNumber < piecesCount; candidateNumber++) {
int candidatesIndex = candidatesOffset + status.permutatorObjects[candidateNumber];
int min = candidateOffsets[candidatesIndex];
int max = candidateOffsets[candidatesIndex + 1];
if (index < min)
index = min;
while (index < max) {
if (IsValid(candidates[index], status.grid)) {
//status.currentCandidatesIndex[actualPiece] = index;
status.permutatorIndices[actualPiece] = candidateNumber - actualPiece;
Swap(actualPiece, candidateNumber);
return true;
}
index++;
}
index = -1;
}
status.permutatorIndices[actualPiece] = 0;
//status.currentCandidatesIndex[actualPiece] = -1;
return false;
}
__solver_access__
void solver::DecreaseActualPiece() {
int& actualPiece = status.actualPiece;
actualPiece--;
if (actualPiece >= 0) {
int pieceIndex = status.currentCandidatesIndex[actualPiece];
status.grid &= ~candidates[pieceIndex];
}
}
__solver_access__
void solver::IncreaseActualPiece() {
int& actualPiece = status.actualPiece;
int pieceIndex = status.currentCandidatesIndex[actualPiece];
status.grid |= candidates[pieceIndex];
actualPiece++;
if (actualPiece < piecesCount) {
int pos = status.position[actualPiece - 1] + 1;
while ((status.grid & (1ULL << pos)) > 0)
pos++;
status.position[actualPiece] = pos;
#ifndef __CUDA_ARCH__
// static int x = 0;
// if (x++ < 15)
// std::cout << "pos " << pos << std::endl;
#endif
}
}
__solver_access__
void solver::AddSolution() {
#ifdef __CUDA_ARCH__
int index = atomicAdd(solutionsCount, 1);
#else
int index = *solutionsCount;
++(*solutionsCount);
#endif
if (index >= solutionsBufferSize)
return;
const int* current = status.currentCandidatesIndex;
int* solution = solutions[index].candidateIndex;
for (int i = 0; i < piecesCount; i++) {
solution[i] = current[i];
}
}
__solver_access__
void solver::DoStep() {
if (!Next()) {
DecreaseActualPiece();
} else {
IncreaseActualPiece();
if (status.actualPiece == piecesCount) {
AddSolution();
DecreaseActualPiece();
}
}
}
// todo: try kernel steps
// todo: try local data instead of shared (copy all)
__solver_access__
bool SolveSingle(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus& status,
solution* solutions,
int* solutionsCount,
int minPiece,
int maxSteps)
{
solver solver(candidates, candidateOffsets, status, solutions, solutionsCount);
int step = 0;
while (step++ < maxSteps) {
solver.DoStep();
if (status.actualPiece < minPiece) {
return true;
}
}
return false;
}
__global__
void SolveKernel(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
solution* solutions,
int* solutionsCount,
int minPiece,
int solversCount,
int maxSteps)
{
// candidates[candidateOffsets[gridSize * piecesCount]]
// candidateOffsets[gridSize * piecesCount + 1] (gridIndex * piecesCount + pieceIndex)
// solversStatus[solversCount]
// solutions[solversCount]
const int solverIndex = threadIdx.x + blockDim.x * blockIdx.x;
if (solverIndex >= solversCount)
return;
solverStatus status(solversStatus[solverIndex]);
SolveSingle(
candidates,
candidateOffsets,
status,
solutions,
solutionsCount,
minPiece,
maxSteps);
solversStatus[solverIndex] = status;
}
__global__
void RemoveFinishedKernelA(
solverStatus* solversStatus,
int minPiece,
int solversCount,
int* newSolversCount,
int* solverToGrab)
{
const int solverIndex = threadIdx.x + blockDim.x * blockIdx.x;
if (solverIndex >= solversCount)
return;
bool finished = solversStatus[solverIndex].actualPiece < minPiece;
solverToGrab[solverIndex] = finished ? (atomicSub(newSolversCount, 1) - 1) : -1;
}
__global__
void RemoveFinishedKernelB(
solverStatus* solversStatus,
int minPiece,
int solversCount,
int* solverToGrab)
{
const int solverIndex = threadIdx.x + blockDim.x * blockIdx.x;
if (solverIndex >= solversCount) // must be the reduced count!
return;
int grab = solverToGrab[solverIndex];
if (grab < 0)
return;
while (solverToGrab[grab] >= 0)
grab = solverToGrab[grab];
solversStatus[solverIndex] = solversStatus[grab];
}
template<typename T>
__host__
static T* raw(device_vector<T>& vector)
{
return thrust::raw_pointer_cast(vector.data());
}
int SolveGPU_T(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
solution* preallocatedSolutions,
std::list<solution>* solutionsList,
int minPiece,
int maxSolutions,
int solversCount)
{
// solverStata sx(solversStatus, solversCount);
const int candidatesCount = gridSize * piecesCount;
device_vector<uint64_t> dCandidates(candidates, candidates + candidateOffsets[candidatesCount]);
device_vector<int> dCandidateOffsets(candidateOffsets, candidateOffsets + candidatesCount);
device_vector<solverStatus> dSolversStatus(solversStatus, solversStatus + solversCount);
device_vector<solution> dSolutions(solutionsBufferSize);
device_vector<int> dSolutionsCount(1);
device_vector<int> dSolversCount(1);
dSolversCount[0] = solversCount;
device_vector<int> dSolverToGrab(solversCount);
std::cout << " pass: " << -1 << ", solvers " << solversCount << std::endl;
const uint64_t* csr = raw(dCandidates);
const int* cosr = raw(dCandidateOffsets);
solverStatus* sssr = raw(dSolversStatus);
solution* ssr = raw(dSolutions);
int* ssc = raw(dSolutionsCount);
int* dsc = raw(dSolversCount);
int* dstg = raw(dSolverToGrab);
const int blockSize = 512;
hipEvent_t startT;
hipEventCreate(&startT);
hipEventRecord(startT, 0);
do {
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
for (int i = 0; i < 5; i++) {
const int steps = 1000 + (10 * solversCount / dSolversCount[0]);
hipLaunchKernelGGL(( SolveKernel), dim3((dSolversCount[0]+blockSize-1)/blockSize), dim3(blockSize), 0, 0, csr, cosr, sssr, ssr, ssc, minPiece, dSolversCount[0], steps);
hipLaunchKernelGGL(( RemoveFinishedKernelA), dim3((dSolversCount[0]+blockSize-1)/blockSize), dim3(blockSize), 0, 0, sssr, minPiece, dSolversCount[0], dsc, dstg);
hipLaunchKernelGGL(( RemoveFinishedKernelB), dim3((dSolversCount[0]+blockSize-1)/blockSize), dim3(blockSize), 0, 0, sssr, minPiece, dSolversCount[0], dstg);
// std::cout << " pass:";
std::cout << " " << i << std::flush;
// std::cout << ", solvers " << dSolversCount[0];
// std::cout << std::endl;
if (dSolversCount[0] == 0)
break;
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
std::cout << std::endl << "Cycle time: " << elapsedTime / 1000 << " s" << std::endl;
hipEventElapsedTime(&elapsedTime, startT, stop);
std::cout << "Solve time: " << elapsedTime / 1000 << " s" << std::endl;
int solutionsC = dSolutionsCount[0];
std::cout << "Solutions: " << solutionsC << ", solvers finished "
<< (solversCount - dSolversCount[0]) << "/" << solversCount << std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
}
while (dSolversCount[0] > 300000);
hipEventDestroy(startT);
if (preallocatedSolutions) {
int count = ::min(maxSolutions, (int)dSolutionsCount[0]);
thrust::copy(dSolutions.begin(), dSolutions.begin() + count, preallocatedSolutions);
}
if (solutionsList) {
host_vector<solution> sols(dSolutions.begin(), dSolutions.begin() + dSolutionsCount[0]);
solutionsList->insert(solutionsList->end(), sols.begin(), sols.end());
}
return (int)dSolutionsCount[0];
}
int SolveGPU_(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
solution* preallocatedSolutions,
std::list<solution>* solutionsList,
int minPiece,
int maxSolutions,
int solversCount)
{
#ifndef __APP__
SaveToFile(candidates, candidateOffsets, solversStatus, minPiece, maxSolutions, solversCount);
#endif
return SolveGPU_T(
candidates,
candidateOffsets,
solversStatus,
preallocatedSolutions,
solutionsList,
minPiece,
maxSolutions,
solversCount);
}
int SolveGPU(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
solution* solutions,
int minPiece,
int maxSolutions,
int solversCount)
{
#ifndef __APP__
SaveToFile(candidates, candidateOffsets, solversStatus, minPiece, maxSolutions, solversCount);
#endif
return SolveGPU_(candidates, candidateOffsets, solversStatus, solutions, 0, minPiece, maxSolutions, solversCount);
}
#ifdef __host_solver__
int SolveCPU(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
solution* solutions,
int minPiece,
int maxSolutions)
{
int count = 0;
while (SolveSingle(
candidates,
candidateOffsets,
*solversStatus,
solutions,
&count,
minPiece,
100000));
return count;
}
#endif
static void Init(solverStatus& status) {
status.actualPiece = 0;
status.grid = 0;
status.position[0] = 0;
for (int i = 0; i < piecesCount; i++) {
status.currentCandidatesIndex[i] = -1;
status.permutatorIndices[i] = 0;
status.permutatorObjects[i] = i;
status.position[i] = 0;
}
}
int SplitCPU(int splitLevel,
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* status) {
if (splitLevel < 1) {
if (status)
Init(*status);
return 1;
}
int n = 0;
solverStatus ss;
Init(ss);
solver s(candidates, candidateOffsets, ss, 0, 0);
s.Split(splitLevel, candidates, candidateOffsets, status, n);
return n;
}
#ifdef __APP__
int main()
{
std::cout << "main" << std::endl;
std_vector<uint64_t> candidates;
std_vector<int> candidateOffsets;
std_vector<solverStatus> solversStatus;
int minPiece;
int maxSolutions;
int solversCount;
try {
LoadFromFile(candidates, candidateOffsets, solversStatus, minPiece, maxSolutions, solversCount);
} catch (std::string& error) {
std::cout << error << std::endl;
return 1;
}
std::list<solution> solutionsList;
SolveGPU_(
raw(candidates),
raw(candidateOffsets),
raw(solversStatus),
0,
&solutionsList,
minPiece,
maxSolutions,
solversCount);
std::cout << solutionsList.size() << std::endl;
return 0;
}
#endif
| 750107408582c2fb241be031e364b1e5e4b7152e.cu | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#define DLL_EXPORTS
#include "cuda_tc.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <vector>
#include <list>
#include <iostream>
#include <fstream>
#include <sstream>
using thrust::device_vector;
using thrust::host_vector;
#define std_vector std::vector
template<typename T>
static T* raw(std_vector<T>& vector)
{
return &vector[0];
}
#ifdef __host_solver__
#define __solver_access__ __host__ __device__
#else
#define __solver_access__ __device__
#endif
static const char* filePath = "/home/igor/Development/cuda-workspace/cuda_tetriscube/data/solve_call";
bool fileExist(const std::string& name) {
std::ifstream file(name.c_str());
return file;
}
std::string nextFile() {
std::ostringstream s;
int n = 0;
do {
s.clear();
s.str("");
s << filePath << n;
n++;
} while (fileExist(s.str()));
return s.str();
}
std::string lastFile() {
std::ostringstream s;
std::string last;
int n = 0;
do {
last = s.str();
s.clear();
s.str("");
s << filePath << n;
n++;
} while (fileExist(s.str()));
return last;
}
void write(std::ofstream& file, const void* value, size_t size) {
file.write(static_cast<const char*>(value), size);
}
void read(std::ifstream& file, void* value, size_t size) {
file.read(static_cast<char*>(value), size);
}
void SaveToFile(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
int minPiece,
int maxSolutions,
int solversCount)
{
// piecesCount = 12,
// gridSize = 64,
// solutionBufferSize = 5
// candidates[candidateOffsets[gridSize * piecesCount]]
// candidateOffsets[gridSize * piecesCount + 1] (gridIndex * piecesCount + pieceIndex)
// solversStatus[solversCount]
std::string name = nextFile();
std::ofstream file(name.c_str(), std::ios::binary);
char v0[] = { (char)sizeof(int), (char)sizeof(uint64_t) };
write(file, v0, sizeof(v0));
int v1[] = { sizeof(solverStatus), piecesCount, gridSize, 0 };
write(file, v1, sizeof(v1));
write(file, candidateOffsets, (gridSize * piecesCount + 1) * sizeof(int));
write(file, candidates, candidateOffsets[gridSize * piecesCount] * sizeof(uint64_t));
int v2[] = { minPiece, maxSolutions, solversCount };
write(file, v2, sizeof(v2));
write(file, solversStatus, solversCount * sizeof(solverStatus));
}
static void TestEqual(int actual, int expected, const std::string& varname) {
if (actual != expected) {
std::ostringstream s;
s << "Invalid " << varname << " (actual " << actual << ", expected " << expected << ")";
throw s.str();
}
}
void LoadFromFile(
std_vector<uint64_t>& candidates,
std_vector<int>& candidateOffsets,
std_vector<solverStatus>& solversStatus,
int& minPiece,
int& maxSolutions,
int& solversCount)
{
// piecesCount = 12,
// gridSize = 64,
// solutionBufferSize = 5
// candidates[candidateOffsets[gridSize * piecesCount]]
// candidateOffsets[gridSize * piecesCount + 1] (gridIndex * piecesCount + pieceIndex)
// solversStatus[solversCount]
std::string name = lastFile();
if (name.empty()) {
throw "No last file";
}
std::ifstream file(name.c_str(), std::ios::binary);
char v0[2];
read(file, v0, sizeof(v0));
TestEqual(v0[0], sizeof(int), "int size");
TestEqual(v0[1], sizeof(uint64_t), "uint64_t size");
int v1[4];
read(file, v1, sizeof(v1));
TestEqual(v1[0], sizeof(solverStatus), "solverStatus size");
TestEqual(v1[1], piecesCount, "piecesCount");
TestEqual(v1[2], gridSize, "gridSize");
// dummy v1[3]
candidateOffsets.resize(gridSize * piecesCount + 1);
read(file, raw(candidateOffsets), candidateOffsets.size() * sizeof(int));
candidates.resize(candidateOffsets[gridSize * piecesCount]);
read(file, raw(candidates), candidates.size() * sizeof(uint64_t));
int v2[3];
read(file, v2, sizeof(v2));
minPiece = v2[0];
maxSolutions = v2[1];
solversCount = v2[2];
solversStatus.resize(solversCount);
read(file, raw(solversStatus), solversStatus.size() * sizeof(solverStatus));
}
class solverStata {
public:
solverStata(solverStatus* status, int solversCount) {
grid = getMember(status, solversCount, &solverStatus::grid);
actualPiece = getMember(status, solversCount, &solverStatus::actualPiece);
for (int i = 0; i < piecesCount; i++) {
position[i] = getMember(status, solversCount, &solverStatus::position, i);
currentCandidatesIndex[i] = getMember(status, solversCount, &solverStatus::currentCandidatesIndex, i);
permutatorIndices[i] = getMember(status, solversCount, &solverStatus::permutatorIndices, i);
permutatorObjects[i] = getMember(status, solversCount, &solverStatus::permutatorObjects, i);
}
}
private:
template<typename T>
static host_vector<T> getMember(solverStatus* status, int solversCount, T solverStatus::*member) {
host_vector<T> result(solversCount);
for (int i = 0; i < solversCount; i++)
result[i] = status[i].*member;
return result;
}
template<typename T>
static host_vector<T> getMember(solverStatus* status, int solversCount, T (solverStatus::*member)[piecesCount], int index) {
host_vector<T> result(solversCount);
for (int i = 0; i < solversCount; i++)
result[i] = (status[i].*member)[index];
return result;
}
device_vector<uint64_t> grid;
device_vector<int> actualPiece;
device_vector<int> position[piecesCount];
device_vector<int> currentCandidatesIndex[piecesCount];
device_vector<int> permutatorIndices[piecesCount];
device_vector<int> permutatorObjects[piecesCount];
};
enum {
solutionsBufferSize = 10000
};
class solver
{
public:
__solver_access__
solver(const uint64_t* candidates,
const int* candidateOffsets,
solverStatus& status,
solution* solutions,
int* solutionsCount)
: candidates(candidates),
candidateOffsets(candidateOffsets),
status(status),
solutions(solutions),
solutionsCount(solutionsCount) {
}
__solver_access__
void DoStep();
#ifdef __host_solver__
__host__
void Split(int level,
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* statusBuffer,
int& n) const {
solverStatus tempStatus = status;
solver temp(candidates, candidateOffsets, tempStatus, 0, 0);
while (temp.Next()) {
solverStatus ss = tempStatus;
solver s(candidates, candidateOffsets, ss, 0, 0);
s.IncreaseActualPiece();
if (level <= 1) {
if (statusBuffer)
statusBuffer[n] = ss;
n++;
}
else {
s.Split(level - 1, candidates, candidateOffsets, statusBuffer, n);
}
}
}
#endif
private:
__solver_access__
static bool IsValid(uint64_t candidate, uint64_t grid);
__solver_access__
void Swap(int i, int j);
__solver_access__
bool Next();
__solver_access__
void DecreaseActualPiece();
__solver_access__
void IncreaseActualPiece();
__solver_access__
void AddSolution();
const uint64_t* candidates;
const int* candidateOffsets;
solverStatus& status;
solution* solutions;
int* solutionsCount;
};
__solver_access__
bool solver::IsValid(uint64_t candidate, uint64_t grid)
{
return (candidate & grid) == 0;
}
__solver_access__
void solver::Swap(int i, int j)
{
if (i == j)
return;
int* objects = status.permutatorObjects;
int t = objects[i];
objects[i] = objects[j];
objects[j] = t;
}
__solver_access__
bool solver::Next()
{
//const int& actualPiece = status.actualPiece;
int actualPiece = status.actualPiece;
int candidatesOffset = status.position[actualPiece] * piecesCount;
//int index = status.currentCandidatesIndex[actualPiece];
int& index = status.currentCandidatesIndex[actualPiece];
index++;
int candidateNumber = actualPiece + status.permutatorIndices[actualPiece];
Swap(actualPiece, candidateNumber);
for (; candidateNumber < piecesCount; candidateNumber++) {
int candidatesIndex = candidatesOffset + status.permutatorObjects[candidateNumber];
int min = candidateOffsets[candidatesIndex];
int max = candidateOffsets[candidatesIndex + 1];
if (index < min)
index = min;
while (index < max) {
if (IsValid(candidates[index], status.grid)) {
//status.currentCandidatesIndex[actualPiece] = index;
status.permutatorIndices[actualPiece] = candidateNumber - actualPiece;
Swap(actualPiece, candidateNumber);
return true;
}
index++;
}
index = -1;
}
status.permutatorIndices[actualPiece] = 0;
//status.currentCandidatesIndex[actualPiece] = -1;
return false;
}
__solver_access__
void solver::DecreaseActualPiece() {
int& actualPiece = status.actualPiece;
actualPiece--;
if (actualPiece >= 0) {
int pieceIndex = status.currentCandidatesIndex[actualPiece];
status.grid &= ~candidates[pieceIndex];
}
}
__solver_access__
void solver::IncreaseActualPiece() {
int& actualPiece = status.actualPiece;
int pieceIndex = status.currentCandidatesIndex[actualPiece];
status.grid |= candidates[pieceIndex];
actualPiece++;
if (actualPiece < piecesCount) {
int pos = status.position[actualPiece - 1] + 1;
while ((status.grid & (1ULL << pos)) > 0)
pos++;
status.position[actualPiece] = pos;
#ifndef __CUDA_ARCH__
// static int x = 0;
// if (x++ < 15)
// std::cout << "pos " << pos << std::endl;
#endif
}
}
__solver_access__
void solver::AddSolution() {
#ifdef __CUDA_ARCH__
int index = atomicAdd(solutionsCount, 1);
#else
int index = *solutionsCount;
++(*solutionsCount);
#endif
if (index >= solutionsBufferSize)
return;
const int* current = status.currentCandidatesIndex;
int* solution = solutions[index].candidateIndex;
for (int i = 0; i < piecesCount; i++) {
solution[i] = current[i];
}
}
__solver_access__
void solver::DoStep() {
if (!Next()) {
DecreaseActualPiece();
} else {
IncreaseActualPiece();
if (status.actualPiece == piecesCount) {
AddSolution();
DecreaseActualPiece();
}
}
}
// todo: try kernel steps
// todo: try local data instead of shared (copy all)
__solver_access__
bool SolveSingle(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus& status,
solution* solutions,
int* solutionsCount,
int minPiece,
int maxSteps)
{
solver solver(candidates, candidateOffsets, status, solutions, solutionsCount);
int step = 0;
while (step++ < maxSteps) {
solver.DoStep();
if (status.actualPiece < minPiece) {
return true;
}
}
return false;
}
__global__
void SolveKernel(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
solution* solutions,
int* solutionsCount,
int minPiece,
int solversCount,
int maxSteps)
{
// candidates[candidateOffsets[gridSize * piecesCount]]
// candidateOffsets[gridSize * piecesCount + 1] (gridIndex * piecesCount + pieceIndex)
// solversStatus[solversCount]
// solutions[solversCount]
const int solverIndex = threadIdx.x + blockDim.x * blockIdx.x;
if (solverIndex >= solversCount)
return;
solverStatus status(solversStatus[solverIndex]);
SolveSingle(
candidates,
candidateOffsets,
status,
solutions,
solutionsCount,
minPiece,
maxSteps);
solversStatus[solverIndex] = status;
}
__global__
void RemoveFinishedKernelA(
solverStatus* solversStatus,
int minPiece,
int solversCount,
int* newSolversCount,
int* solverToGrab)
{
const int solverIndex = threadIdx.x + blockDim.x * blockIdx.x;
if (solverIndex >= solversCount)
return;
bool finished = solversStatus[solverIndex].actualPiece < minPiece;
solverToGrab[solverIndex] = finished ? (atomicSub(newSolversCount, 1) - 1) : -1;
}
__global__
void RemoveFinishedKernelB(
solverStatus* solversStatus,
int minPiece,
int solversCount,
int* solverToGrab)
{
const int solverIndex = threadIdx.x + blockDim.x * blockIdx.x;
if (solverIndex >= solversCount) // must be the reduced count!
return;
int grab = solverToGrab[solverIndex];
if (grab < 0)
return;
while (solverToGrab[grab] >= 0)
grab = solverToGrab[grab];
solversStatus[solverIndex] = solversStatus[grab];
}
template<typename T>
__host__
static T* raw(device_vector<T>& vector)
{
return thrust::raw_pointer_cast(vector.data());
}
int SolveGPU_T(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
solution* preallocatedSolutions,
std::list<solution>* solutionsList,
int minPiece,
int maxSolutions,
int solversCount)
{
// solverStata sx(solversStatus, solversCount);
const int candidatesCount = gridSize * piecesCount;
device_vector<uint64_t> dCandidates(candidates, candidates + candidateOffsets[candidatesCount]);
device_vector<int> dCandidateOffsets(candidateOffsets, candidateOffsets + candidatesCount);
device_vector<solverStatus> dSolversStatus(solversStatus, solversStatus + solversCount);
device_vector<solution> dSolutions(solutionsBufferSize);
device_vector<int> dSolutionsCount(1);
device_vector<int> dSolversCount(1);
dSolversCount[0] = solversCount;
device_vector<int> dSolverToGrab(solversCount);
std::cout << " pass: " << -1 << ", solvers " << solversCount << std::endl;
const uint64_t* csr = raw(dCandidates);
const int* cosr = raw(dCandidateOffsets);
solverStatus* sssr = raw(dSolversStatus);
solution* ssr = raw(dSolutions);
int* ssc = raw(dSolutionsCount);
int* dsc = raw(dSolversCount);
int* dstg = raw(dSolverToGrab);
const int blockSize = 512;
cudaEvent_t startT;
cudaEventCreate(&startT);
cudaEventRecord(startT, 0);
do {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i = 0; i < 5; i++) {
const int steps = 1000 + (10 * solversCount / dSolversCount[0]);
SolveKernel<<<(dSolversCount[0]+blockSize-1)/blockSize, blockSize>>>(csr, cosr, sssr, ssr, ssc, minPiece, dSolversCount[0], steps);
RemoveFinishedKernelA<<<(dSolversCount[0]+blockSize-1)/blockSize, blockSize>>>(sssr, minPiece, dSolversCount[0], dsc, dstg);
RemoveFinishedKernelB<<<(dSolversCount[0]+blockSize-1)/blockSize, blockSize>>>(sssr, minPiece, dSolversCount[0], dstg);
// std::cout << " pass:";
std::cout << " " << i << std::flush;
// std::cout << ", solvers " << dSolversCount[0];
// std::cout << std::endl;
if (dSolversCount[0] == 0)
break;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
std::cout << std::endl << "Cycle time: " << elapsedTime / 1000 << " s" << std::endl;
cudaEventElapsedTime(&elapsedTime, startT, stop);
std::cout << "Solve time: " << elapsedTime / 1000 << " s" << std::endl;
int solutionsC = dSolutionsCount[0];
std::cout << "Solutions: " << solutionsC << ", solvers finished "
<< (solversCount - dSolversCount[0]) << "/" << solversCount << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
while (dSolversCount[0] > 300000);
cudaEventDestroy(startT);
if (preallocatedSolutions) {
int count = std::min(maxSolutions, (int)dSolutionsCount[0]);
thrust::copy(dSolutions.begin(), dSolutions.begin() + count, preallocatedSolutions);
}
if (solutionsList) {
host_vector<solution> sols(dSolutions.begin(), dSolutions.begin() + dSolutionsCount[0]);
solutionsList->insert(solutionsList->end(), sols.begin(), sols.end());
}
return (int)dSolutionsCount[0];
}
int SolveGPU_(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
solution* preallocatedSolutions,
std::list<solution>* solutionsList,
int minPiece,
int maxSolutions,
int solversCount)
{
#ifndef __APP__
SaveToFile(candidates, candidateOffsets, solversStatus, minPiece, maxSolutions, solversCount);
#endif
return SolveGPU_T(
candidates,
candidateOffsets,
solversStatus,
preallocatedSolutions,
solutionsList,
minPiece,
maxSolutions,
solversCount);
}
int SolveGPU(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
solution* solutions,
int minPiece,
int maxSolutions,
int solversCount)
{
#ifndef __APP__
SaveToFile(candidates, candidateOffsets, solversStatus, minPiece, maxSolutions, solversCount);
#endif
return SolveGPU_(candidates, candidateOffsets, solversStatus, solutions, 0, minPiece, maxSolutions, solversCount);
}
#ifdef __host_solver__
int SolveCPU(
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* solversStatus,
solution* solutions,
int minPiece,
int maxSolutions)
{
int count = 0;
while (SolveSingle(
candidates,
candidateOffsets,
*solversStatus,
solutions,
&count,
minPiece,
100000));
return count;
}
#endif
static void Init(solverStatus& status) {
status.actualPiece = 0;
status.grid = 0;
status.position[0] = 0;
for (int i = 0; i < piecesCount; i++) {
status.currentCandidatesIndex[i] = -1;
status.permutatorIndices[i] = 0;
status.permutatorObjects[i] = i;
status.position[i] = 0;
}
}
int SplitCPU(int splitLevel,
const uint64_t* candidates,
const int* candidateOffsets,
solverStatus* status) {
if (splitLevel < 1) {
if (status)
Init(*status);
return 1;
}
int n = 0;
solverStatus ss;
Init(ss);
solver s(candidates, candidateOffsets, ss, 0, 0);
s.Split(splitLevel, candidates, candidateOffsets, status, n);
return n;
}
#ifdef __APP__
int main()
{
std::cout << "main" << std::endl;
std_vector<uint64_t> candidates;
std_vector<int> candidateOffsets;
std_vector<solverStatus> solversStatus;
int minPiece;
int maxSolutions;
int solversCount;
try {
LoadFromFile(candidates, candidateOffsets, solversStatus, minPiece, maxSolutions, solversCount);
} catch (std::string& error) {
std::cout << error << std::endl;
return 1;
}
std::list<solution> solutionsList;
SolveGPU_(
raw(candidates),
raw(candidateOffsets),
raw(solversStatus),
0,
&solutionsList,
minPiece,
maxSolutions,
solversCount);
std::cout << solutionsList.size() << std::endl;
return 0;
}
#endif
|
230d7c14b6a8608efb08959cb07eec8dc79dbd02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) OpenMMLab. All rights reserved
#include "prroi_pool_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void PrROIPoolForwardCUDAKernelLauncher(Tensor input, Tensor rois,
Tensor output, int pooled_height,
int pooled_width, float spatial_scale) {
int output_size = output.numel();
int channels = input.size(1);
int height = input.size(2);
int width = input.size(3);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( prroi_pool_forward_cuda_kernel<float>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, input.data_ptr<float>(), rois.data_ptr<float>(),
output.data_ptr<float>(), pooled_height, pooled_width,
static_cast<float>(spatial_scale), channels, height, width);
AT_CUDA_CHECK(hipGetLastError());
}
void PrROIPoolBackwardCUDAKernelLauncher(Tensor grad_output, Tensor rois,
Tensor grad_input, int pooled_height,
int pooled_width,
float spatial_scale) {
int output_size = grad_output.numel();
int channels = grad_input.size(1);
int height = grad_input.size(2);
int width = grad_input.size(3);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad_output.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( prroi_pool_backward_cuda_kernel<float>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, grad_output.data_ptr<float>(), rois.data_ptr<float>(),
grad_input.data_ptr<float>(), pooled_height, pooled_width,
static_cast<float>(spatial_scale), channels, height, width);
AT_CUDA_CHECK(hipGetLastError());
}
void PrROIPoolCoorBackwardCUDAKernelLauncher(Tensor output, Tensor grad_output,
Tensor input, Tensor rois,
Tensor grad_rois,
int pooled_height,
int pooled_width,
float spatial_scale) {
int output_size = grad_output.numel();
int channels = input.size(1);
int height = input.size(2);
int width = input.size(3);
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad_output.device());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
hipLaunchKernelGGL(( prroi_pool_coor_backward_cuda_kernel<float>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream,
output_size, output.data_ptr<float>(), grad_output.data_ptr<float>(),
input.data_ptr<float>(), rois.data_ptr<float>(),
grad_rois.data_ptr<float>(), pooled_height, pooled_width,
static_cast<float>(spatial_scale), channels, height, width);
AT_CUDA_CHECK(hipGetLastError());
}
| 230d7c14b6a8608efb08959cb07eec8dc79dbd02.cu | // Copyright (c) OpenMMLab. All rights reserved
#include "prroi_pool_cuda_kernel.cuh"
#include "pytorch_cuda_helper.hpp"
void PrROIPoolForwardCUDAKernelLauncher(Tensor input, Tensor rois,
Tensor output, int pooled_height,
int pooled_width, float spatial_scale) {
int output_size = output.numel();
int channels = input.size(1);
int height = input.size(2);
int width = input.size(3);
at::cuda::CUDAGuard device_guard(input.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
prroi_pool_forward_cuda_kernel<float>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, input.data_ptr<float>(), rois.data_ptr<float>(),
output.data_ptr<float>(), pooled_height, pooled_width,
static_cast<float>(spatial_scale), channels, height, width);
AT_CUDA_CHECK(cudaGetLastError());
}
void PrROIPoolBackwardCUDAKernelLauncher(Tensor grad_output, Tensor rois,
Tensor grad_input, int pooled_height,
int pooled_width,
float spatial_scale) {
int output_size = grad_output.numel();
int channels = grad_input.size(1);
int height = grad_input.size(2);
int width = grad_input.size(3);
at::cuda::CUDAGuard device_guard(grad_output.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
prroi_pool_backward_cuda_kernel<float>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, grad_output.data_ptr<float>(), rois.data_ptr<float>(),
grad_input.data_ptr<float>(), pooled_height, pooled_width,
static_cast<float>(spatial_scale), channels, height, width);
AT_CUDA_CHECK(cudaGetLastError());
}
void PrROIPoolCoorBackwardCUDAKernelLauncher(Tensor output, Tensor grad_output,
Tensor input, Tensor rois,
Tensor grad_rois,
int pooled_height,
int pooled_width,
float spatial_scale) {
int output_size = grad_output.numel();
int channels = input.size(1);
int height = input.size(2);
int width = input.size(3);
at::cuda::CUDAGuard device_guard(grad_output.device());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
prroi_pool_coor_backward_cuda_kernel<float>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>(
output_size, output.data_ptr<float>(), grad_output.data_ptr<float>(),
input.data_ptr<float>(), rois.data_ptr<float>(),
grad_rois.data_ptr<float>(), pooled_height, pooled_width,
static_cast<float>(spatial_scale), channels, height, width);
AT_CUDA_CHECK(cudaGetLastError());
}
|
4f4051775572177ecbbf08e395e30929bdee2808.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/*
* Device code.
*/
#ifndef _PARTICLES_KERNEL_H_
#define _PARTICLES_KERNEL_H_
#include <stdio.h>
#include <math.h>
#include "cutil_math.h"
#include "math_constants.h"
#include "particles_kernel.cuh"
#if USE_TEX
// textures for particle position and velocity
texture<float4, 1, hipReadModeElementType> oldPosTex;
texture<float4, 1, hipReadModeElementType> oldVelTex;
texture<uint2, 1, hipReadModeElementType> particleHashTex;
texture<uint, 1, hipReadModeElementType> cellStartTex;
texture<uint, 1, hipReadModeElementType> gridCountersTex;
texture<uint, 1, hipReadModeElementType> gridCellsTex;
#endif
__constant__ SimParams params;
// integrate particle attributes
__global__ void
integrate(float4* newPos, float4* newVel,
float4* oldPos, float4* oldVel,
float deltaTime)
{
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
float4 pos4 = oldPos[index];
float4 vel4 = oldVel[index];
float3 pos = make_float3(pos4);
float3 vel = make_float3(vel4);
vel += params.gravity * deltaTime;
vel *= params.globalDamping;
// new position = old position + velocity * deltaTime
pos += vel * deltaTime;
// bounce off cube sides
if (pos.x > 1.0f - params.particleRadius) { pos.x = 1.0f - params.particleRadius; vel.x *= params.boundaryDamping; }
if (pos.x < -1.0f + params.particleRadius) { pos.x = -1.0f + params.particleRadius; vel.x *= params.boundaryDamping;}
if (pos.y > 1.0f - params.particleRadius) { pos.y = 1.0f - params.particleRadius; vel.y *= params.boundaryDamping; }
if (pos.y < -1.0f + params.particleRadius) { pos.y = -1.0f + params.particleRadius; vel.y *= params.boundaryDamping;}
if (pos.z > 1.0f - params.particleRadius) { pos.z = 1.0f - params.particleRadius; vel.z *= params.boundaryDamping; }
if (pos.z < -1.0f + params.particleRadius) { pos.z = -1.0f + params.particleRadius; vel.z *= params.boundaryDamping;}
// store new position and velocity
newPos[index] = make_float4(pos, pos4.w);
newVel[index] = make_float4(vel, vel4.w);
}
// calculate position in uniform grid
__device__ int3 calcGridPos(float4 p)
{
int3 gridPos;
gridPos.x = floor((p.x - params.worldOrigin.x) / params.cellSize.x);
gridPos.y = floor((p.y - params.worldOrigin.y) / params.cellSize.y);
gridPos.z = floor((p.z - params.worldOrigin.z) / params.cellSize.z);
return gridPos;
}
// calculate address in grid from position (clamping to edges)
__device__ uint calcGridHash(int3 gridPos)
{
gridPos.x = max(0, min(gridPos.x, params.gridSize.x-1));
gridPos.y = max(0, min(gridPos.y, params.gridSize.y-1));
gridPos.z = max(0, min(gridPos.z, params.gridSize.z-1));
return __mul24(__mul24(gridPos.z, params.gridSize.y), params.gridSize.x) + __mul24(gridPos.y, params.gridSize.x) + gridPos.x;
}
// add particle to cell using atomics
__device__ void addParticleToCell(int3 gridPos,
uint index,
uint* gridCounters,
uint* gridCells)
{
// calculate grid hash
uint gridHash = calcGridHash(gridPos);
// increment cell counter using atomics
#if defined CUDA_NO_SM_11_ATOMIC_INTRINSICS
int counter = 0;
#else
int counter = atomicAdd(&gridCounters[gridHash], 1); // returns previous value
counter = min(counter, params.maxParticlesPerCell-1);
#endif
// write particle index into this cell (very uncoalesced!)
gridCells[gridHash*params.maxParticlesPerCell + counter] = index;
}
// update uniform grid
__global__ void
updateGridD(float4* pos,
uint* gridCounters,
uint* gridCells)
{
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
float4 p = pos[index];
// get address in grid
int3 gridPos = calcGridPos(p);
addParticleToCell(gridPos, index, gridCounters, gridCells);
}
// calculate grid hash value for each particle
__global__ void
calcHashD(float4* pos,
uint2* particleHash)
{
int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
float4 p = pos[index];
// get address in grid
int3 gridPos = calcGridPos(p);
uint gridHash = calcGridHash(gridPos);
// store grid hash and particle index
particleHash[index] = make_uint2(gridHash, index);
}
// rearrange particle data into sorted order, and find the start of each cell in the
// sorted hash array
__global__ void
reorderDataAndFindCellStartD(uint2* particleHash, // particle id sorted by hash
float4* oldPos,
float4* oldVel,
float4* sortedPos,
float4* sortedVel,
uint* cellStart)
{
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
uint2 sortedData = particleHash[index];
// Load hash data into shared memory so that we can look
// at neighboring particle's hash value without loading
// two hash values per thread
__shared__ uint sharedHash[257];
sharedHash[threadIdx.x+1] = sortedData.x;
if (index > 0 && threadIdx.x == 0)
{
// first thread in block must load neighbor particle hash
volatile uint2 prevData = particleHash[index-1];
sharedHash[0] = prevData.x;
}
__syncthreads();
if (index == 0 || sortedData.x != sharedHash[threadIdx.x])
{
cellStart[sortedData.x] = index;
}
// Now use the sorted index to reorder the pos and vel data
float4 pos = FETCH(oldPos, sortedData.y); // macro does either global read or texture fetch
float4 vel = FETCH(oldVel, sortedData.y); // see particles_kernel.cuh
__syncthreads();
sortedPos[index] = pos;
sortedVel[index] = vel;
}
// collide two spheres using DEM method
__device__ float3 collideSpheres(float4 posA, float4 posB,
float4 velA, float4 velB,
float radiusA, float radiusB,
float attraction)
{
// calculate relative position
float3 relPos;
relPos.x = posB.x - posA.x;
relPos.y = posB.y - posA.y;
relPos.z = posB.z - posA.z;
float dist = length(relPos);
float collideDist = radiusA + radiusB;
float3 force = make_float3(0.0f);
if (dist < collideDist) {
float3 norm = relPos / dist;
// relative velocity
float3 relVel;
relVel.x = velB.x - velA.x;
relVel.y = velB.y - velA.y;
relVel.z = velB.z - velA.z;
// relative tangential velocity
float3 tanVel = relVel - (dot(relVel, norm) * norm);
// spring force
force = -params.spring*(collideDist - dist) * norm;
// dashpot (damping) force
force += params.damping*relVel;
// tangential shear force
force += params.shear*tanVel;
// attraction
force += attraction*relPos;
}
return force;
}
// collide particle with all particles in a given cell
// version using grid built with atomics
__device__
float3 collideCell(int3 gridPos,
uint index,
float4 pos,
float4 vel,
float4* oldPos,
float4* oldVel,
uint* gridCounters,
uint* gridCells)
{
float3 force = make_float3(0.0f);
if ((gridPos.x < 0) || (gridPos.x > params.gridSize.x-1) ||
(gridPos.y < 0) || (gridPos.y > params.gridSize.y-1) ||
(gridPos.z < 0) || (gridPos.z > params.gridSize.z-1)) {
return force;
}
uint gridHash = calcGridHash(gridPos);
// iterate over particles in this cell
uint particlesInCell = FETCH(gridCounters, gridHash);
particlesInCell = min(particlesInCell, params.maxParticlesPerCell-1);
for(uint i=0; i<particlesInCell; i++) {
uint index2 = FETCH(gridCells, gridHash*params.maxParticlesPerCell + i);
if (index2 != index) { // check not colliding with self
float4 pos2 = FETCH(oldPos, index2);
float4 vel2 = FETCH(oldVel, index2);
// collide two spheres
float3 projVec = collideSpheres(pos, pos2, vel, vel2, params.particleRadius, params.particleRadius, params.attraction);
force += projVec;
}
}
return force;
}
// version using sorted grid
__device__
float3 collideCell2(int3 gridPos,
uint index,
float4 pos,
float4 vel,
float4* oldPos,
float4* oldVel,
uint2* particleHash,
uint* cellStart)
{
float3 force = make_float3(0.0f);
if ((gridPos.x < 0) || (gridPos.x > params.gridSize.x-1) ||
(gridPos.y < 0) || (gridPos.y > params.gridSize.y-1) ||
(gridPos.z < 0) || (gridPos.z > params.gridSize.z-1)) {
return force;
}
uint gridHash = calcGridHash(gridPos);
// get start of bucket for this cell
uint bucketStart = FETCH(cellStart, gridHash);
if (bucketStart == 0xffffffff)
return force; // cell empty
// iterate over particles in this cell
for(uint i=0; i<params.maxParticlesPerCell; i++) {
uint index2 = bucketStart + i;
uint2 cellData = FETCH(particleHash, index2);
if (cellData.x != gridHash) break; // no longer in same bucket
if (index2 != index) { // check not colliding with self
float4 pos2 = FETCH(oldPos, index2);
float4 vel2 = FETCH(oldVel, index2);
// collide two spheres
float3 projVec = collideSpheres(pos, pos2, vel, vel2, params.particleRadius, params.particleRadius, params.attraction);
force += projVec;
}
}
return force;
}
__global__ void
collideD(float4* newPos, float4* newVel,
float4* oldPos, float4* oldVel,
#if USE_SORT
uint2* particleHash,
uint* cellStart
#else
uint* gridCounters,
uint* gridCells
#endif
)
{
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
// read particle data from sorted arrays
float4 pos = FETCH(oldPos, index);
float4 vel = FETCH(oldVel, index);
// get address in grid
int3 gridPos = calcGridPos(pos);
float3 force = make_float3(0.0f);
// examine only neighbouring cells
for(int z=-1; z<=1; z++) {
for(int y=-1; y<=1; y++) {
for(int x=-1; x<=1; x++) {
#if USE_SORT
force += collideCell2(gridPos + make_int3(x, y, z), index, pos, vel, oldPos, oldVel, particleHash, cellStart);
#else
force += collideCell(gridPos + make_int3(x, y, z), index, pos, vel, oldPos, oldVel, gridCounters, gridCells);
#endif
}
}
}
float3 projVec = collideSpheres(pos, params.colliderPos, vel, make_float4(0.0f, 0.0f, 0.0f, 0.0f), params.particleRadius, params.colliderRadius, 0.0f);
force += projVec;
#if USE_SORT
// write new velocity back to original unsorted location
volatile uint2 sortedData = particleHash[index];
newVel[sortedData.y] = vel + make_float4(force, 0.0f);
#else
newVel[index] = vel + make_float4(force, 0.0f);
#endif
}
#endif
| 4f4051775572177ecbbf08e395e30929bdee2808.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/*
* Device code.
*/
#ifndef _PARTICLES_KERNEL_H_
#define _PARTICLES_KERNEL_H_
#include <stdio.h>
#include <math.h>
#include "cutil_math.h"
#include "math_constants.h"
#include "particles_kernel.cuh"
#if USE_TEX
// textures for particle position and velocity
texture<float4, 1, cudaReadModeElementType> oldPosTex;
texture<float4, 1, cudaReadModeElementType> oldVelTex;
texture<uint2, 1, cudaReadModeElementType> particleHashTex;
texture<uint, 1, cudaReadModeElementType> cellStartTex;
texture<uint, 1, cudaReadModeElementType> gridCountersTex;
texture<uint, 1, cudaReadModeElementType> gridCellsTex;
#endif
__constant__ SimParams params;
// integrate particle attributes
__global__ void
integrate(float4* newPos, float4* newVel,
float4* oldPos, float4* oldVel,
float deltaTime)
{
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
float4 pos4 = oldPos[index];
float4 vel4 = oldVel[index];
float3 pos = make_float3(pos4);
float3 vel = make_float3(vel4);
vel += params.gravity * deltaTime;
vel *= params.globalDamping;
// new position = old position + velocity * deltaTime
pos += vel * deltaTime;
// bounce off cube sides
if (pos.x > 1.0f - params.particleRadius) { pos.x = 1.0f - params.particleRadius; vel.x *= params.boundaryDamping; }
if (pos.x < -1.0f + params.particleRadius) { pos.x = -1.0f + params.particleRadius; vel.x *= params.boundaryDamping;}
if (pos.y > 1.0f - params.particleRadius) { pos.y = 1.0f - params.particleRadius; vel.y *= params.boundaryDamping; }
if (pos.y < -1.0f + params.particleRadius) { pos.y = -1.0f + params.particleRadius; vel.y *= params.boundaryDamping;}
if (pos.z > 1.0f - params.particleRadius) { pos.z = 1.0f - params.particleRadius; vel.z *= params.boundaryDamping; }
if (pos.z < -1.0f + params.particleRadius) { pos.z = -1.0f + params.particleRadius; vel.z *= params.boundaryDamping;}
// store new position and velocity
newPos[index] = make_float4(pos, pos4.w);
newVel[index] = make_float4(vel, vel4.w);
}
// calculate position in uniform grid
__device__ int3 calcGridPos(float4 p)
{
int3 gridPos;
gridPos.x = floor((p.x - params.worldOrigin.x) / params.cellSize.x);
gridPos.y = floor((p.y - params.worldOrigin.y) / params.cellSize.y);
gridPos.z = floor((p.z - params.worldOrigin.z) / params.cellSize.z);
return gridPos;
}
// calculate address in grid from position (clamping to edges)
__device__ uint calcGridHash(int3 gridPos)
{
gridPos.x = max(0, min(gridPos.x, params.gridSize.x-1));
gridPos.y = max(0, min(gridPos.y, params.gridSize.y-1));
gridPos.z = max(0, min(gridPos.z, params.gridSize.z-1));
return __mul24(__mul24(gridPos.z, params.gridSize.y), params.gridSize.x) + __mul24(gridPos.y, params.gridSize.x) + gridPos.x;
}
// add particle to cell using atomics
__device__ void addParticleToCell(int3 gridPos,
uint index,
uint* gridCounters,
uint* gridCells)
{
// calculate grid hash
uint gridHash = calcGridHash(gridPos);
// increment cell counter using atomics
#if defined CUDA_NO_SM_11_ATOMIC_INTRINSICS
int counter = 0;
#else
int counter = atomicAdd(&gridCounters[gridHash], 1); // returns previous value
counter = min(counter, params.maxParticlesPerCell-1);
#endif
// write particle index into this cell (very uncoalesced!)
gridCells[gridHash*params.maxParticlesPerCell + counter] = index;
}
// update uniform grid
__global__ void
updateGridD(float4* pos,
uint* gridCounters,
uint* gridCells)
{
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
float4 p = pos[index];
// get address in grid
int3 gridPos = calcGridPos(p);
addParticleToCell(gridPos, index, gridCounters, gridCells);
}
// calculate grid hash value for each particle
__global__ void
calcHashD(float4* pos,
uint2* particleHash)
{
int index = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
float4 p = pos[index];
// get address in grid
int3 gridPos = calcGridPos(p);
uint gridHash = calcGridHash(gridPos);
// store grid hash and particle index
particleHash[index] = make_uint2(gridHash, index);
}
// rearrange particle data into sorted order, and find the start of each cell in the
// sorted hash array
__global__ void
reorderDataAndFindCellStartD(uint2* particleHash, // particle id sorted by hash
float4* oldPos,
float4* oldVel,
float4* sortedPos,
float4* sortedVel,
uint* cellStart)
{
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
uint2 sortedData = particleHash[index];
// Load hash data into shared memory so that we can look
// at neighboring particle's hash value without loading
// two hash values per thread
__shared__ uint sharedHash[257];
sharedHash[threadIdx.x+1] = sortedData.x;
if (index > 0 && threadIdx.x == 0)
{
// first thread in block must load neighbor particle hash
volatile uint2 prevData = particleHash[index-1];
sharedHash[0] = prevData.x;
}
__syncthreads();
if (index == 0 || sortedData.x != sharedHash[threadIdx.x])
{
cellStart[sortedData.x] = index;
}
// Now use the sorted index to reorder the pos and vel data
float4 pos = FETCH(oldPos, sortedData.y); // macro does either global read or texture fetch
float4 vel = FETCH(oldVel, sortedData.y); // see particles_kernel.cuh
__syncthreads();
sortedPos[index] = pos;
sortedVel[index] = vel;
}
// collide two spheres using DEM method
__device__ float3 collideSpheres(float4 posA, float4 posB,
float4 velA, float4 velB,
float radiusA, float radiusB,
float attraction)
{
// calculate relative position
float3 relPos;
relPos.x = posB.x - posA.x;
relPos.y = posB.y - posA.y;
relPos.z = posB.z - posA.z;
float dist = length(relPos);
float collideDist = radiusA + radiusB;
float3 force = make_float3(0.0f);
if (dist < collideDist) {
float3 norm = relPos / dist;
// relative velocity
float3 relVel;
relVel.x = velB.x - velA.x;
relVel.y = velB.y - velA.y;
relVel.z = velB.z - velA.z;
// relative tangential velocity
float3 tanVel = relVel - (dot(relVel, norm) * norm);
// spring force
force = -params.spring*(collideDist - dist) * norm;
// dashpot (damping) force
force += params.damping*relVel;
// tangential shear force
force += params.shear*tanVel;
// attraction
force += attraction*relPos;
}
return force;
}
// collide particle with all particles in a given cell
// version using grid built with atomics
__device__
float3 collideCell(int3 gridPos,
uint index,
float4 pos,
float4 vel,
float4* oldPos,
float4* oldVel,
uint* gridCounters,
uint* gridCells)
{
float3 force = make_float3(0.0f);
if ((gridPos.x < 0) || (gridPos.x > params.gridSize.x-1) ||
(gridPos.y < 0) || (gridPos.y > params.gridSize.y-1) ||
(gridPos.z < 0) || (gridPos.z > params.gridSize.z-1)) {
return force;
}
uint gridHash = calcGridHash(gridPos);
// iterate over particles in this cell
uint particlesInCell = FETCH(gridCounters, gridHash);
particlesInCell = min(particlesInCell, params.maxParticlesPerCell-1);
for(uint i=0; i<particlesInCell; i++) {
uint index2 = FETCH(gridCells, gridHash*params.maxParticlesPerCell + i);
if (index2 != index) { // check not colliding with self
float4 pos2 = FETCH(oldPos, index2);
float4 vel2 = FETCH(oldVel, index2);
// collide two spheres
float3 projVec = collideSpheres(pos, pos2, vel, vel2, params.particleRadius, params.particleRadius, params.attraction);
force += projVec;
}
}
return force;
}
// version using sorted grid
__device__
float3 collideCell2(int3 gridPos,
uint index,
float4 pos,
float4 vel,
float4* oldPos,
float4* oldVel,
uint2* particleHash,
uint* cellStart)
{
float3 force = make_float3(0.0f);
if ((gridPos.x < 0) || (gridPos.x > params.gridSize.x-1) ||
(gridPos.y < 0) || (gridPos.y > params.gridSize.y-1) ||
(gridPos.z < 0) || (gridPos.z > params.gridSize.z-1)) {
return force;
}
uint gridHash = calcGridHash(gridPos);
// get start of bucket for this cell
uint bucketStart = FETCH(cellStart, gridHash);
if (bucketStart == 0xffffffff)
return force; // cell empty
// iterate over particles in this cell
for(uint i=0; i<params.maxParticlesPerCell; i++) {
uint index2 = bucketStart + i;
uint2 cellData = FETCH(particleHash, index2);
if (cellData.x != gridHash) break; // no longer in same bucket
if (index2 != index) { // check not colliding with self
float4 pos2 = FETCH(oldPos, index2);
float4 vel2 = FETCH(oldVel, index2);
// collide two spheres
float3 projVec = collideSpheres(pos, pos2, vel, vel2, params.particleRadius, params.particleRadius, params.attraction);
force += projVec;
}
}
return force;
}
__global__ void
collideD(float4* newPos, float4* newVel,
float4* oldPos, float4* oldVel,
#if USE_SORT
uint2* particleHash,
uint* cellStart
#else
uint* gridCounters,
uint* gridCells
#endif
)
{
int index = __mul24(blockIdx.x,blockDim.x) + threadIdx.x;
// read particle data from sorted arrays
float4 pos = FETCH(oldPos, index);
float4 vel = FETCH(oldVel, index);
// get address in grid
int3 gridPos = calcGridPos(pos);
float3 force = make_float3(0.0f);
// examine only neighbouring cells
for(int z=-1; z<=1; z++) {
for(int y=-1; y<=1; y++) {
for(int x=-1; x<=1; x++) {
#if USE_SORT
force += collideCell2(gridPos + make_int3(x, y, z), index, pos, vel, oldPos, oldVel, particleHash, cellStart);
#else
force += collideCell(gridPos + make_int3(x, y, z), index, pos, vel, oldPos, oldVel, gridCounters, gridCells);
#endif
}
}
}
float3 projVec = collideSpheres(pos, params.colliderPos, vel, make_float4(0.0f, 0.0f, 0.0f, 0.0f), params.particleRadius, params.colliderRadius, 0.0f);
force += projVec;
#if USE_SORT
// write new velocity back to original unsorted location
volatile uint2 sortedData = particleHash[index];
newVel[sortedData.y] = vel + make_float4(force, 0.0f);
#else
newVel[index] = vel + make_float4(force, 0.0f);
#endif
}
#endif
|
3409cb27e70657a2fc51476c61369f9c9ccdcd93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
__global__ void Even(int* a,int n)
{
int k;
int tid = threadIdx.x;
if(tid%2 == 0 && tid != n-1)
{
//printf("etid = %d\n",tid);
if(a[tid] > a[tid+1]){//printf("even : %d\n",a[tid]);
k = a[tid];
a[tid] = a[tid+1];
a[tid+1] = k;
}
}
}
__global__ void Odd(int* a,int n)
{
int k;
int tid = threadIdx.x;
if(tid%2 != 0 && tid != n-1)
{
//printf("otid = %d\n",tid);
if(a[tid] > a[tid+1]){
//printf("odd : %d\n",a[tid]);
k = a[tid];
a[tid] = a[tid+1];
a[tid+1] = k;
}
}
}
int main(void)
{
int N = 0, i, sz;
int* d_a;
printf("Enter Array size:\n");
scanf("%d", &N);
int A[100];
printf("Enter string:\n");
for (i = 0; i < N; i++)
{
A[i] = N-i;
//scanf("%d", &A[i]);
printf("%d ",A[i]);
}
printf("\n");
sz = sizeof(int) * N;
hipMalloc((void**)&d_a, sz);
hipMemcpy(d_a, A, sz, hipMemcpyHostToDevice);
int val = N/2;
if(N%2 != 0)
{
val = val + 1;
}
for(i=0;i<val;i++)
{
hipLaunchKernelGGL(( Odd), dim3(1), dim3(N), 0, 0, d_a,N);
hipLaunchKernelGGL(( Even), dim3(1), dim3(N), 0, 0, d_a,N);
}
hipMemcpy(A, d_a, sz, hipMemcpyDeviceToHost);
for (i = 0; i < N; i++)
{
printf("%d ", A[i]);
}
printf("\n");
hipFree(d_a);
} | 3409cb27e70657a2fc51476c61369f9c9ccdcd93.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
__global__ void Even(int* a,int n)
{
int k;
int tid = threadIdx.x;
if(tid%2 == 0 && tid != n-1)
{
//printf("etid = %d\n",tid);
if(a[tid] > a[tid+1]){//printf("even : %d\n",a[tid]);
k = a[tid];
a[tid] = a[tid+1];
a[tid+1] = k;
}
}
}
__global__ void Odd(int* a,int n)
{
int k;
int tid = threadIdx.x;
if(tid%2 != 0 && tid != n-1)
{
//printf("otid = %d\n",tid);
if(a[tid] > a[tid+1]){
//printf("odd : %d\n",a[tid]);
k = a[tid];
a[tid] = a[tid+1];
a[tid+1] = k;
}
}
}
int main(void)
{
int N = 0, i, sz;
int* d_a;
printf("Enter Array size:\n");
scanf("%d", &N);
int A[100];
printf("Enter string:\n");
for (i = 0; i < N; i++)
{
A[i] = N-i;
//scanf("%d", &A[i]);
printf("%d ",A[i]);
}
printf("\n");
sz = sizeof(int) * N;
cudaMalloc((void**)&d_a, sz);
cudaMemcpy(d_a, A, sz, cudaMemcpyHostToDevice);
int val = N/2;
if(N%2 != 0)
{
val = val + 1;
}
for(i=0;i<val;i++)
{
Odd<<<1, N>>>(d_a,N);
Even<<<1, N>>>(d_a,N);
}
cudaMemcpy(A, d_a, sz, cudaMemcpyDeviceToHost);
for (i = 0; i < N; i++)
{
printf("%d ", A[i]);
}
printf("\n");
cudaFree(d_a);
} |
3da2c88173b53f65a6964f6f6f257b89f2df6b7d.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Open sourced multi-head attention
**/
#include "fastertransformer/allocator.h"
#include "fastertransformer/cuda/multi_head_attention.h"
#include "fastertransformer/cuda/open_attention.h"
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <cmath>
namespace fastertransformer{
namespace cuda{
/**
* Multi-head attetion open sourced
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
#pragma unroll
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
#pragma unroll
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : -1e20f;
val = warpReduceMax(val);
return val;
}
__inline__ __device__
int target_index(int id1, int id2, int id3, int id4, int dim_1, int dim_2, int dim_3, int dim_4)
{
return id1 * (dim_2 * dim_3 * dim_4) + id3 * (dim_2 * dim_4) + id2 * dim_4 + id4;
}
//build a mapping for fullData to removePaddingData
//grid((valid_word_num+63)/64)
//block(64)
__global__ void mappingRemovePaddingData(int *mapping, const int* sequence_id_offset, const int valid_word_num){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < valid_word_num)
mapping[idx + __ldg(sequence_id_offset + idx)] = idx;
}
//add_QK_bias_transform for batch int8 cublasLtMatmul & per axis quantization for weight
//1.add QK bias
//2.transform each Q K CUBLASLT_ORDER_COL32 matrixes into a series of sub-matrix (with CUBLASLT_ORDER_COL32/CUBLASLT_ORDER_COL4_4R2_8C layout)
// Q, K are CUBLASLT_ORDER_COL32 matrixes of m = batch_size * seq_len, n = head_num * size_per_head
// q_buf_ is of batchCount = batch_size * head_num, m = seq_len, n = size_per_head, CUBLASLT_ORDER_COL32
// k_buf_ is of batchCount = batch_size * head_num, m = seq_len, n = size_per_head, CUBLASLT_ORDER_COL4_4R2_8C
//only for int32 input & int8 output
//seq_len, size_per_head must be a multiple of 32
//grid.x = batch_size * seq_len * 2;
//block.x = head_num * size_per_head / 4;
//using char4
template <typename T>
__global__
void add_QK_bias_transform(int8_t *q_buf_, int8_t *k_buf_, const int32_t* Q, const T* bias_Q,
const int32_t* K, const T* bias_K, const int m, const int batch_size,
const int seq_len, const int head_num, const int size_per_head, int stride,
const float * q_weight_amax, const float *q_input_deQFactor_div127_ptr, const float * k_weight_amax,
const float *k_input_deQFactor_div127_ptr, const float *q_output_scale_ptr, const float *k_output_scale_ptr)
{
const int32_t* data_ptr;
char4* buf_ptr4;
const T* bias_ptr;
const float* weight_amax;
int qk_id = blockIdx.x / m;
data_ptr = qk_id == 0 ? Q : K;
buf_ptr4 = qk_id == 0 ? (char4*)q_buf_ : (char4*)k_buf_;
bias_ptr = qk_id == 0 ? bias_Q : bias_K;
const float input_deQFactor_div127 = qk_id == 0 ? __ldg(q_input_deQFactor_div127_ptr) : __ldg(k_input_deQFactor_div127_ptr);
weight_amax = qk_id == 0 ? q_weight_amax : k_weight_amax;
const float output_scale = qk_id == 0 ? __ldg(q_output_scale_ptr) : __ldg(k_output_scale_ptr);
int threadIdx4 = threadIdx.x << 2;
int batch_id = (blockIdx.x % m) / seq_len;
int head_id = threadIdx4 / size_per_head;
int id_in_head = threadIdx4 % size_per_head;
int word_id = blockIdx.x % seq_len;
int data_id = (((threadIdx4 >> 5) << 5)*m + ((blockIdx.x%m) << 5) + (threadIdx4&31));
float scale;
float tmp;
char4 tmp4;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.x = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4)* input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.y = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.z = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.w = float_to_int8_rn(tmp*output_scale);
//row_id, col_id of sub-matrix (m = seq_len, n = size_per_head), column-major
int row_id = word_id;
int col_id = id_in_head;
//new (row, rol) of LtTrans COL32/COL4 sub-matrix, leading dim = (COL32_ * seq_len)
int new_col = col_id >> 5;
int new_row = (qk_id != 1) ?
//COL32
((row_id << 5) + (col_id&31))
:
//COL4
////row_id/8 is the number of tile of (8 rows 32 columns) -- column-major
////row_id%2 is even row, otherwise odd row
////col_id%COL32_/8 is the number tile of (8 rows 8 columns)
(
((((row_id >> 3) << 3) + ((row_id&1) << 2) + ((col_id&31) >> 3)) << 5) +
////col_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(row_id%8/2) is (the row id of alternating 4 rows) - 1
(((((col_id&7) >= 4)?4:0) + ((row_id&7) >> 1)) << 2) +
////col_id%4 is the id of 4 cols
(col_id&3)
)
;
buf_ptr4[(((batch_id*head_num + head_id) * stride + (new_col << 5)*seq_len + new_row) >> 2)] = tmp4;
}
//add_QK_bias_transform & rebuild padding for batch int8 cublasLtMatmul & per axis quantization for weight
//1.add QK bias
//2.transform each Q K CUBLASLT_ORDER_COL32 matrixes into a series of sub-matrix (with CUBLASLT_ORDER_COL32/CUBLASLT_ORDER_COL4_4R2_8C layout)
// Q, K are CUBLASLT_ORDER_COL32 matrixes of m = valid_word_num, n = head_num * size_per_head
// q_buf_ is of batchCount = batch_size * head_num, m = seq_len, n = size_per_head, CUBLASLT_ORDER_COL32
// k_buf_ is of batchCount = batch_size * head_num, m = seq_len, n = size_per_head, CUBLASLT_ORDER_COL4_4R2_8C
//only for int32 input & int8 output
//seq_len, size_per_head must be a multiple of 32
//grid.x = valid_word_num * 2;
//block.x = head_num * size_per_head / 4;
//using char4
template <typename T>
__global__
void add_QK_bias_transform_rebuild_padding(int8_t *q_buf_, int8_t *k_buf_, const int32_t* Q, const T* bias_Q,
const int32_t* K, const T* bias_K, const int* sequence_id_offset,
const int valid_word_num, const int m, const int batch_size, const int seq_len,
const int head_num, const int size_per_head, int stride, const float * q_weight_amax,
const float *q_input_deQFactor_div127_ptr, const float * k_weight_amax,
const float *k_input_deQFactor_div127_ptr, const float *q_output_scale_ptr, const float *k_output_scale_ptr)
{
const int32_t* data_ptr;
char4* buf_ptr4;
const T* bias_ptr;
const float* weight_amax;
int qk_id = blockIdx.x / valid_word_num;
data_ptr = qk_id == 0 ? Q : K;
buf_ptr4 = qk_id == 0 ? (char4*)q_buf_ : (char4*)k_buf_;
bias_ptr = qk_id == 0 ? bias_Q : bias_K;
int threadIdx4 = threadIdx.x << 2;
int m_full_idx = blockIdx.x % valid_word_num;
m_full_idx = (valid_word_num != m) ? (m_full_idx + __ldg(sequence_id_offset+m_full_idx)) : m_full_idx;
int batch_id = m_full_idx / seq_len;
int head_id = threadIdx4 / size_per_head;
int id_in_head = threadIdx4 % size_per_head;
int word_id = m_full_idx % seq_len;
const float input_deQFactor_div127 = qk_id == 0 ? __ldg(q_input_deQFactor_div127_ptr) : __ldg(k_input_deQFactor_div127_ptr);
weight_amax = qk_id == 0 ? q_weight_amax : k_weight_amax;
const float output_scale = qk_id == 0 ? __ldg(q_output_scale_ptr) : __ldg(k_output_scale_ptr);
int data_id = (((threadIdx4 >> 5) << 5)*valid_word_num + ((blockIdx.x%valid_word_num) << 5) + (threadIdx4&31));
float scale;
float tmp;
char4 tmp4;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.x = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4)* input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.y = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.z = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.w = float_to_int8_rn(tmp*output_scale);
//row_id, col_id of sub-matrix (m = seq_len, n = size_per_head), column-major
int row_id = word_id;
int col_id = id_in_head;
//new (row, rol) of LtTrans COL32/COL4 sub-matrix, leading dim = (COL32_ * seq_len)
int new_col = col_id >> 5;
int new_row = (qk_id != 1) ?
//COL32
((row_id << 5) + (col_id&31))
:
//COL4
////row_id/8 is the number of tile of (8 rows 32 columns) -- column-major
////row_id%2 is even row, otherwise odd row
////col_id%COL32_/8 is the number tile of (8 rows 8 columns)
(
((((row_id >> 3) << 3) + ((row_id&1) << 2) + ((col_id&31) >> 3)) << 5) +
////col_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(row_id%8/2) is (the row id of alternating 4 rows) - 1
(((((col_id&7) >= 4)?4:0) + ((row_id&7) >> 1)) << 2) +
////col_id%4 is the id of 4 cols
(col_id&3)
)
;
buf_ptr4[(((batch_id*head_num + head_id) * stride + (new_col << 5)*seq_len + new_row) >> 2)] = tmp4;
}
//input matrix a matrix of m = batch_size*seq_len , n = head_num*size_per_head, CUBLASLT_ORDER_COL32
//output matrixes are a series of sub-matrixes with size of m = size_per_head, n = seq_len , CUBLASLT_ORDER_COL4_4R2_8C
//only for int32_t Input int8_t Output
//seq_len, size_per_head must be a multiple of 32
//grid = (size_per_head/32, seq_len/32, batch_size*head_num)
//block = (8, 32);
//using char4
//per axis quantization for weight
template <typename T>
__global__
void add_V_bias_transform(int8_t *v_buf_, const int32_t *V, const T *V_bias, const int batch_size, const int seq_len,
const int head_num, const int size_per_head, int stride, const float* weight_amax,
const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
__shared__ int8_t shm[32][33];
const int32_t* data_ptr = V;
char4* buf_ptr4 = (char4*) v_buf_;
const T* bias_ptr = V_bias;
int threadIdx4 = threadIdx.x << 2;
//for src of (seq_len, size_per_head)
int batch_id = blockIdx.z/head_num;
int head_id = blockIdx.z%head_num;
int word_id = (blockIdx.y << 5) + threadIdx.y;
int id_in_size = (blockIdx.x << 5) + threadIdx4;
//for V layout (batch_size*seq_len, head_num*size_per_head)
int col = head_id*size_per_head + id_in_size;
int row = batch_id*seq_len + word_id;
int inIdx = (((col >> 5) << 5)*batch_size*seq_len + ((row << 5) + (col&31)));
//for shm row-major
int sh_col = threadIdx4;
int sh_row = threadIdx.y;
float tmp;
float scale;
//const half2* bias_ptr2 = (const half2*)bias_ptr;
//half2 tmp2;
//tmp2 = __ldg(&bias_ptr2[col >> 1]);
scale = __ldg(data_ptr + inIdx) * __ldg(weight_amax + col) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr + col));//(tmp2.x);
shm[sh_row][sh_col] = float_to_int8_rn(tmp*out_scale);
scale = __ldg(data_ptr + inIdx + 1) * __ldg(weight_amax + col + 1) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+1));//(tmp2.y);
shm[sh_row][sh_col+1] = float_to_int8_rn(tmp*out_scale);
//tmp2 = __ldg(&bias_ptr2[(col >> 1) + 1]);
scale = __ldg(data_ptr+inIdx+2) * __ldg(weight_amax+col+2) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+2));//(tmp2.x);
shm[sh_row][sh_col+2] = float_to_int8_rn(tmp*out_scale);
scale = __ldg(data_ptr+inIdx + 3) * __ldg(weight_amax+col+3) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+3));//(tmp2.y);
shm[sh_row][sh_col+3] = float_to_int8_rn(tmp*out_scale);
__syncthreads();
//for dst of (size_per_head, seq_len)
word_id = (blockIdx.y << 5) + threadIdx4;
id_in_size = (blockIdx.x << 5) + threadIdx.y;
col = (word_id >> 5);
row = (
//COL4
////id_in_size/8 is the number of tile of (8 rows 32 columns) -- column-major
////id_in_size%2 is even row, otherwise odd row
////word_id%COL32_/8 is the number tile of (8 rows 8 columns)
((((id_in_size >> 3) << 3) + ((id_in_size&1) << 2) + ((word_id&31) >> 3)) << 5) +
////word_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(id_in_size%8/2) is (the row id of alternating 4 rows) - 1
(((((word_id&7) >= 4)?4:0) + ((id_in_size&7) >> 1)) << 2) +
////word_id%4 is the id of 4 cols
(word_id&3)
);
char4 dataTmp;
dataTmp.x = shm[sh_col][sh_row];
dataTmp.y = shm[sh_col+1][sh_row];
dataTmp.z = shm[sh_col+2][sh_row];
dataTmp.w = shm[sh_col+3][sh_row];
buf_ptr4[(blockIdx.z*stride + (col << 5)*size_per_head + row) >> 2] = dataTmp;
}
template <>
__global__
void add_V_bias_transform(int8_t *v_buf_, const int32_t *V, const half *V_bias, const int batch_size, const int seq_len,
const int head_num, const int size_per_head, int stride, const float* weight_amax,
const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
__shared__ int8_t shm[32][33];
const int32_t* data_ptr = V;
char4* buf_ptr4 = (char4*) v_buf_;
int threadIdx4 = threadIdx.x << 2;
//for src of (seq_len, size_per_head)
int batch_id = blockIdx.z/head_num;
int head_id = blockIdx.z%head_num;
int blockIdy32 = (blockIdx.y << 5);
int blockIdx32 = (blockIdx.x << 5);
int word_id = blockIdy32 + threadIdx.y;
int id_in_size = blockIdx32 + threadIdx4;
//for V layout (batch_size*seq_len, head_num*size_per_head)
int col = head_id*size_per_head + id_in_size;
int row = batch_id*seq_len + word_id;
int inIdx = ((col & 0xffffffe0)*batch_size*seq_len + ((row << 5) + (col&31)));
//for shm row-major
int sh_col = threadIdx4;
int sh_row = threadIdx.y;
int col_2 = col >> 1;
float scale;
const half2* bias_ptr2 = (const half2*)V_bias;
half2 tmp2;
tmp2 = __ldg(bias_ptr2+col_2);
scale = __ldg(data_ptr+inIdx) * __ldg(weight_amax+col) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.x);
shm[sh_row][sh_col] = float_to_int8_rn(scale*out_scale);
scale = __ldg(data_ptr+inIdx+1) * __ldg(weight_amax+col+1) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.y);
shm[sh_row][sh_col+1] = float_to_int8_rn(scale*out_scale);
tmp2 = __ldg(bias_ptr2 + col_2 + 1);
scale = __ldg(data_ptr + inIdx + 2) * __ldg(weight_amax + col + 2) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.x);
shm[sh_row][sh_col+2] = float_to_int8_rn(scale*out_scale);
scale = __ldg(data_ptr + inIdx + 3) * __ldg(weight_amax + col + 3) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.y);
shm[sh_row][sh_col+3] = float_to_int8_rn(scale*out_scale);
__syncthreads();
//for dst of (size_per_head, seq_len)
word_id = blockIdy32 + threadIdx4;
id_in_size = blockIdx32 + threadIdx.y;
col = (word_id >> 5);
row = (
//COL4
////id_in_size/8 is the number of tile of (8 rows 32 columns) -- column-major
////id_in_size%2 is even row, otherwise odd row
////word_id%COL32_/8 is the number tile of (8 rows 8 columns)
(((id_in_size & 0xfffffff8) + ((id_in_size&1) << 2) + ((word_id&31) >> 3)) << 5) +
////word_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(id_in_size%8/2) is (the row id of alternating 4 rows) - 1
(((((word_id&7) >= 4)?4:0) + ((id_in_size&7) >> 1)) << 2) +
////word_id%4 is the id of 4 cols
(word_id&3)
);
char4 dataTmp;
dataTmp.x = shm[sh_col][sh_row];
dataTmp.y = shm[sh_col+1][sh_row];
dataTmp.z = shm[sh_col+2][sh_row];
dataTmp.w = shm[sh_col+3][sh_row];
buf_ptr4[(blockIdx.z*stride + (col << 5)*size_per_head + row) >> 2] = dataTmp;
}
//add bias into V & rebuild padding
//input matrix a matrix of m = valid_word_num, n = head_num*size_per_head, CUBLASLT_ORDER_COL32
//output matrixes are a series of sub-matrixes with size of m = size_per_head, n = seq_len , CUBLASLT_ORDER_COL4_4R2_8C
//only for int32_t Input int8_t Output
//seq_len, size_per_head must be a multiple of 32
//grid = (size_per_head/32, seq_len/32, batch_size*head_num)
//block = (8, 32);
//using char4
//per axis quantization for weight
template <typename T>
__global__
void add_V_bias_transform_rebuild_padding(int8_t *v_buf_, const int32_t *V, const T *V_bias, const int* sequence_id_map, const int valid_word_num,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, int stride,
const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
__shared__ int8_t shm[32][33];
const int32_t* data_ptr = V;
char4* buf_ptr4 = (char4*) v_buf_;
const T* bias_ptr = V_bias;
int threadIdx4 = threadIdx.x << 2;
//for src of (seq_len, size_per_head)
int batch_id = blockIdx.z/head_num;
int head_id = blockIdx.z%head_num;
int word_id = (blockIdx.y << 5) + threadIdx.y;
int id_in_size = (blockIdx.x << 5) + threadIdx4;
//for shm row-major
int sh_col = threadIdx4;
int sh_row = threadIdx.y;
//for V layout (batch_size*seq_len, head_num*size_per_head)
int col;
int row = __ldg(sequence_id_map + batch_id*seq_len + word_id);
if (row != -1){
col = head_id*size_per_head + id_in_size;
int inIdx = ((col & 0xffffffe0)*valid_word_num + ((row << 5) + (col&31)));
float tmp;
float scale;
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
scale = __ldg(data_ptr + inIdx) * __ldg(weight_amax + col) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr + col));
shm[sh_row][sh_col] = float_to_int8_rn(tmp*out_scale);
scale = __ldg(data_ptr + inIdx + 1) * __ldg(weight_amax + col + 1) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+1));
shm[sh_row][sh_col+1] = float_to_int8_rn(tmp*out_scale);
scale = __ldg(data_ptr+inIdx+2) * __ldg(weight_amax+col+2) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+2));
shm[sh_row][sh_col+2] = float_to_int8_rn(tmp*out_scale);
scale = __ldg(data_ptr+inIdx + 3) * __ldg(weight_amax+col+3) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+3));
shm[sh_row][sh_col+3] = float_to_int8_rn(tmp*out_scale);
}
else{
shm[sh_row][sh_col] = shm[sh_row][sh_col + 1] = shm[sh_row][sh_col + 2] = shm[sh_row][sh_col + 3] = 0;
}
__syncthreads();
char4 dataTmp;
dataTmp.x = shm[sh_col][sh_row];
dataTmp.y = shm[sh_col+1][sh_row];
dataTmp.z = shm[sh_col+2][sh_row];
dataTmp.w = shm[sh_col+3][sh_row];
//for dst of (size_per_head, seq_len)
word_id = (blockIdx.y << 5) + threadIdx4;
id_in_size = (blockIdx.x << 5) + threadIdx.y;
col = (word_id >> 5);
row = (
//COL4
////id_in_size/8 is the number of tile of (8 rows 32 columns) -- column-major
////id_in_size%2 is even row, otherwise odd row
////word_id%COL32_/8 is the number tile of (8 rows 8 columns)
(((id_in_size & 0xfffffff8) + ((id_in_size&1) << 2) + ((word_id&31) >> 3)) << 5) +
////word_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(id_in_size%8/2) is (the row id of alternating 4 rows) - 1
(((((word_id&7) >= 4)?4:0) + ((id_in_size&7) >> 1)) << 2) +
////word_id%4 is the id of 4 cols
(word_id&3)
);
buf_ptr4[(blockIdx.z*stride + (col << 5)*size_per_head + row) >> 2] = dataTmp;
}
template <>
__global__
void add_V_bias_transform_rebuild_padding(int8_t *v_buf_, const int32_t *V, const half *V_bias, const int* sequence_id_map, const int valid_word_num,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, int stride,
const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
__shared__ int8_t shm[32][33];
const int32_t* data_ptr = V;
char4* buf_ptr4 = (char4*) v_buf_;
int threadIdx4 = threadIdx.x << 2;
//for src of (seq_len, size_per_head)
int batch_id = blockIdx.z/head_num;
int head_id = blockIdx.z%head_num;
int blockIdy32 = (blockIdx.y << 5);
int blockIdx32 = (blockIdx.x << 5);
int word_id = blockIdy32 + threadIdx.y;
int id_in_size = blockIdx32 + threadIdx4;
//for shm row-major
int sh_col = threadIdx4;
int sh_row = threadIdx.y;
//for V layout (batch_size*seq_len, head_num*size_per_head)
int col;
int row = __ldg(sequence_id_map + batch_id*seq_len + word_id);
if (row >= 0){
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
col = head_id*size_per_head + id_in_size;
int inIdx = ((col & 0xffffffe0)*valid_word_num + ((row << 5) + (col&31)));
int col_2 = col >> 1;
float scale;
const half2* bias_ptr2 = (const half2*)V_bias;
half2 tmp2;
tmp2 = __ldg(bias_ptr2+col_2);
scale = __ldg(data_ptr+inIdx) * __ldg(weight_amax+col) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.x);
shm[sh_row][sh_col] = float_to_int8_rn(scale*out_scale);
scale = __ldg(data_ptr+inIdx+1) * __ldg(weight_amax+col+1) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.y);
shm[sh_row][sh_col+1] = float_to_int8_rn(scale*out_scale);
tmp2 = __ldg(bias_ptr2 + col_2 + 1);
scale = __ldg(data_ptr + inIdx + 2) * __ldg(weight_amax + col + 2) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.x);
shm[sh_row][sh_col+2] = float_to_int8_rn(scale*out_scale);
scale = __ldg(data_ptr + inIdx + 3) * __ldg(weight_amax + col + 3) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.y);
shm[sh_row][sh_col+3] = float_to_int8_rn(scale*out_scale);
}
else{
shm[sh_row][sh_col] = shm[sh_row][sh_col + 1] = shm[sh_row][sh_col + 2] = shm[sh_row][sh_col + 3] = 0;
}
__syncthreads();
char4 dataTmp;
dataTmp.x = shm[sh_col][sh_row];
dataTmp.y = shm[sh_col+1][sh_row];
dataTmp.z = shm[sh_col+2][sh_row];
dataTmp.w = shm[sh_col+3][sh_row];
//for dst of (size_per_head, seq_len)
word_id = blockIdy32 + threadIdx4;
id_in_size = blockIdx32 + threadIdx.y;
col = (word_id >> 5);
row = (
//COL4
////id_in_size/8 is the number of tile of (8 rows 32 columns) -- column-major
////id_in_size%2 is even row, otherwise odd row
////word_id%COL32_/8 is the number tile of (8 rows 8 columns)
(((id_in_size & 0xfffffff8) + ((id_in_size&1) << 2) + ((word_id&31) >> 3)) << 5) +
////word_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(id_in_size%8/2) is (the row id of alternating 4 rows) - 1
(((((word_id&7) >= 4)?4:0) + ((id_in_size&7) >> 1)) << 2) +
////word_id%4 is the id of 4 cols
(word_id&3)
);
buf_ptr4[(blockIdx.z*stride + (col << 5)*size_per_head + row) >> 2] = dataTmp;
}
template<typename T>
__global__
void add_QKV_bias(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int word_per_block)
{
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x * word_per_block / m;
int row_offset = (blockIdx.x * word_per_block % m) * n;
if(qkv_id == 0)
{
data_ptr = Q + row_offset;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
}
else if(qkv_id == 1)
{
data_ptr = K + row_offset;
buf_ptr = k_buf_;
bias_ptr = bias_K;
}
else
{
data_ptr = V + row_offset;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int batch_id = (blockIdx.x * word_per_block % m) / seq_len;
int head_id = threadIdx.x / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x * word_per_block) % seq_len;
T bias = __ldg(&bias_ptr[threadIdx.x]);
for(int i = word_start_id; i < word_start_id + word_per_block; ++i)
{
T tmp = data_ptr[threadIdx.x] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
template <>
__global__
void add_QKV_bias(half* Q, const half* bias_Q, half* K, const half* bias_K, half* V, const half* bias_V,
half* q_buf_, half* k_buf_, half* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int word_per_block)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = tid / (head_num * seq_len * size_per_head);
int seq_id = (tid % (head_num * seq_len * size_per_head)) / (head_num * size_per_head);
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id, batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x;
half2* src_ptr = (half2*)Q;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)K;
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)V;
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
}
template<typename T>
__global__
void add_QKV_bias_rebuild_padding(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int* mask_offset)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int bdim = blockDim.x;
const int tgt_batch_id = (bid + mask_offset[bid]) / seq_len;
const int tgt_seq_id = (bid + mask_offset[bid]) % seq_len;
const int tgt_head_id = tid / size_per_head;
const int tgt_hidden_id = tid % size_per_head;
const int src_id = bid * bdim + tid;
const int tgt_id = tgt_batch_id * head_num * seq_len * size_per_head + \
tgt_head_id * seq_len * size_per_head + \
tgt_seq_id * size_per_head + \
tgt_hidden_id;
q_buf_[tgt_id] = Q[src_id] + bias_Q[tid];
k_buf_[tgt_id] = K[src_id] + bias_K[tid];
v_buf_[tgt_id] = V[src_id] + bias_V[tid];
}
template <typename T>
__global__
void softmax_kernel(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len,
const T scalar)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int mask_offset = batch_id * seq_len * seq_len;
__shared__ float s_sum, s_max;
for(int i = 0; i < seq_len; ++i)
{
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val): -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk / s_sum);
qk_offset += seq_len;
mask_offset += seq_len;
}
}
template <typename T>
__global__
void softmax_kernel_v2(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num,
const int seq_len, const float scalar)
{
int batch_id = blockIdx.x / head_num / seq_len;
int seq_id = blockIdx.x % seq_len;
int qk_offset = blockIdx.x * seq_len;
int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len;
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val) : -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
//grid = (seq_len/word_per_thread, batch_size, head_num)
//block.x = max(32, (seq_len + 31)/32*32)
template <typename T>
__global__
void softmax_kernel_v3(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len, const T scalar)
{
bool qual = threadIdx.x < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
float tmp = -1e20f;
int qk_offset;
__shared__ float s_mean, s_max;
if (qual){
qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x;
int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x;
float qk = static_cast<float>(qk_buf_[qk_offset]);
float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset]));
mask_val = (1.0f - mask_val) * -10000.0f;
tmp = qk * static_cast<float>(scalar) + mask_val;
}
float max_val = blockReduceMax<float>(tmp);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
float qk_tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual)
qk_buf_[qk_offset] = (T)(qk_tmp * s_mean);
}
}
//grid = (seq_len/word_per_thread, batch_size, head_num)
//block.x = max(32, (seq_len/2 + 31)/32*32)
//seq_len % 2 == 0
template <>
__global__
void softmax_kernel_v3(half* qk_buf_, const half* attr_mask,
const int batch_size, const int head_num,
const int seq_len, const half scalar)
{
int threadIdx2 = threadIdx.x << 1;
bool qual = threadIdx2 < seq_len;
half2* qk_buf_half2Ptr = (half2*) qk_buf_;
const half2* attr_mask_half2Ptr = (const half2*) attr_mask;
__shared__ float s_mean, s_max;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
int qk_offset;
half2 tmp = __float2half2_rn(0.0f);
float max_val = -1e20f;
half2 qk;
if (qual){
qk_offset = ((((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len) >> 1) + threadIdx.x;
int mask_offset = (((blockIdx.y * seq_len + seq_id) * seq_len) >> 1) + threadIdx.x;
qk = qk_buf_half2Ptr[qk_offset];
half2 mask_val = __ldg(&attr_mask_half2Ptr[mask_offset]);
half2 mask_val_tmp = __hmul2(__hsub2(__float2half2_rn(1.0f), mask_val), __float2half2_rn(-10000.0f));
tmp = __hadd2(__hmul2(__half2half2(scalar), qk), mask_val_tmp);
max_val = fmax((float)tmp.x, (float)tmp.y);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
if (qual){
tmp = h2exp(__hsub2(tmp, __float2half2_rn(s_max)));
}
float sum_val = blockDim.x <= 32 ? warpReduceSum((float)(tmp.x + tmp.y)) : blockReduceSum<float>((float)(tmp.x + tmp.y));
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual){
qk = __hmul2(tmp, __float2half2_rn(s_mean));
qk_buf_half2Ptr[qk_offset] = qk;
}
}
}
//grid = (seq_len/word_per_thread, batch_size, head_num)
//block.x = max(32, (seq_len + 31)/32*32)
//for seq_len not larger than 32
template <typename T>
__global__
void softmax_kernel_v3_LE32(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len, const T scalar)
{
bool qual = threadIdx.x < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
int qk_offset;
__shared__ float s_mean, s_max;
float tmp = -1e20f;
if (qual){
qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x;
int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x;
float qk = static_cast<float>(qk_buf_[qk_offset]);
float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset]));
mask_val = (1.0f - mask_val) * -10000.0f;
tmp = static_cast<float>(qk) * static_cast<float>(scalar) + mask_val;
}
float max_val = warpReduceMax<float>(tmp);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = warpReduceSum<float>(tmp);
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual)
qk_buf_[qk_offset] = (T)(tmp * s_mean);
}
}
//int_buf are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
//grid = (seq_len, batch_size, head_num)
//block.x = max(32, (seq_len/4 + 31)/32*32)
//for int32_t I; int8 O;
template <typename T>
__global__
void softmax_COL32(int8_t* qk_buf_, const int32_t* int_buf, const T* attr_mask, const int batch_size,
const int head_num, const int seq_len, const float scalar1a, const float *scalar1b,
const float *scalar1c, const float *amax_ptr, const int head_num_x_seq_len, const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdx4 = threadIdx.x << 2;
char4* buf4Ptr = (char4 *)qk_buf_;
bool qual = threadIdx4 < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
char4 tmp4;
float4 floatTmp4 = {0.0f, 0.0f, 0.0f, 0.0f};
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) +
(threadIdx4 & 0xffffffe0) * seq_len +
(seq_id << 5) + (threadIdx4 & 31);
if (qual){
floatTmp4.x = static_cast<float>(__ldg(int_buf + inIdx)) * scalar1;
floatTmp4.y = static_cast<float>(__ldg(int_buf+inIdx+1)) * scalar1;
floatTmp4.z = static_cast<float>(__ldg(int_buf+inIdx+2)) * scalar1;
floatTmp4.w = static_cast<float>(__ldg(int_buf+inIdx+3)) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual){
mask_id = threadIdx4 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
//for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id))) * -10000.0f;
floatTmp4.x = floatTmp4.x + mask_val;
max_val = fmaxf(max_val, floatTmp4.x);
//for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id+1))) * -10000.0f;
floatTmp4.y = floatTmp4.y + mask_val;
max_val = fmaxf(max_val, floatTmp4.y);
//for z
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id+2))) * -10000.0f;
floatTmp4.z = floatTmp4.z + mask_val;
max_val = fmaxf(max_val, floatTmp4.z);
//for w
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id+3))) * -10000.0f;
floatTmp4.w = floatTmp4.w + mask_val;
max_val = fmaxf(max_val, floatTmp4.w);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual){
floatTmp4.x = __expf(floatTmp4.x - s_max);
sum_val += floatTmp4.x;
floatTmp4.y = __expf(floatTmp4.y - s_max);
sum_val += floatTmp4.y;
floatTmp4.z = __expf(floatTmp4.z - s_max);
sum_val += floatTmp4.z;
floatTmp4.w = __expf(floatTmp4.w - s_max);
sum_val += floatTmp4.w;
}
sum_val = blockDim.x <= 32 ? warpReduceSum(sum_val) : blockReduceSum<float>(sum_val);
if (threadIdx.x == 0){
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual){
tmp4.x = float_to_int8_rn(floatTmp4.x*s_sum);
tmp4.y = float_to_int8_rn(floatTmp4.y*s_sum);
tmp4.z = float_to_int8_rn(floatTmp4.z*s_sum);
tmp4.w = float_to_int8_rn(floatTmp4.w*s_sum);
buf4Ptr[inIdx >> 2] = tmp4;
}
}
}
//int_buf are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
//grid = (seq_len, batch_size, head_num)
//block.x = (seq_len + 31)/32
//for int32_t I; int8 O;
//for seq_len <= 32
template <typename T>
__global__
void softmax_COL32_LE32(int8_t* qk_buf_, const int32_t* int_buf, const T* attr_mask, const int batch_size,
const int head_num, const int seq_len, const float scalar1a, const float *scalar1b,
const float *scalar1c, const float *amax_ptr, const int head_num_x_seq_len, const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdxx = threadIdx.x;
bool qual = threadIdxx < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) +
(threadIdxx & 0xffffffe0) * seq_len +
(seq_id << 5) + (threadIdxx & 31);
float floatTmp = qual ? static_cast<float>(__ldg(int_buf + inIdx)) * scalar1 : 0.0f;
float mask_val, max_val;
__shared__ float s_max, s_sum;
mask_id = qual ? threadIdxx + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len : 0;
mask_val = qual ? (1.0f - static_cast<float>(__ldg(attr_mask+mask_id))) * -10000.0f : 0.0f;
floatTmp = qual ? floatTmp + mask_val : 0.0f;
max_val = qual ? floatTmp : -1e20f;
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
floatTmp = qual ? __expf(floatTmp - s_max) : 0.0f;
float sum_val = blockDim.x <= 32 ? warpReduceSum(floatTmp) : blockReduceSum<float>(floatTmp);
if (threadIdx.x == 0){
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual){
qk_buf_[inIdx] = float_to_int8_rn(floatTmp*s_sum);
}
}
}
//int_buf are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
//grid = (seq_len, batch_size, head_num)
//block.x = max(32, (seq_len/2 + 31)/32*32)
//for int32_t I; int8 O;
//for seq_len in (32, 64]
template <typename T>
__global__
void softmax_COL32_LE64(int8_t* qk_buf_, const int32_t* int_buf, const T* attr_mask, const int batch_size,
const int head_num, const int seq_len, const float scalar1a, const float *scalar1b,
const float *scalar1c, const float *amax_ptr, const int head_num_x_seq_len, const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdx2 = threadIdx.x << 1;
char2* buf2Ptr = (char2 *)qk_buf_;
bool qual = threadIdx2 < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
char2 tmp2;
float2 floatTmp2 = {0.0f, 0.0f};
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) +
(threadIdx2 & 0xffffffe0) * seq_len +
(seq_id << 5) + (threadIdx2 & 31);
if (qual){
floatTmp2.x = static_cast<float>(__ldg(int_buf + inIdx)) * scalar1;
floatTmp2.y = static_cast<float>(__ldg(int_buf + inIdx + 1)) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual){
mask_id = threadIdx2 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
//for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id))) * -10000.0f;
floatTmp2.x = floatTmp2.x + mask_val;
//for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id+1))) * -10000.0f;
floatTmp2.y = floatTmp2.y + mask_val;
max_val = fmaxf(floatTmp2.x, floatTmp2.y);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual){
floatTmp2.x = __expf(floatTmp2.x - s_max);
sum_val += floatTmp2.x;
floatTmp2.y = __expf(floatTmp2.y - s_max);
sum_val += floatTmp2.y;
}
sum_val = blockDim.x <= 32 ? warpReduceSum(sum_val) : blockReduceSum<float>(sum_val);
if (threadIdx.x == 0){
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual){
tmp2.x = float_to_int8_rn(floatTmp2.x*s_sum);
tmp2.y = float_to_int8_rn(floatTmp2.y*s_sum);
buf2Ptr[inIdx >> 1] = tmp2;
}
}
}
template<typename T>
__global__
void transpose(T* src, T* dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len))/ seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head
+ head_id * size_per_head + threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template<>
__global__
void transpose(half* src, half* dst,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = tid / (head_num * seq_len * size_per_head);
int head_id = (tid % (head_num * seq_len * size_per_head)) / (seq_len * size_per_head);
int seq_id = (tid % (seq_len * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, head_id, seq_id, id, batch_size, head_num, seq_len, size_per_head);
half2* src_ptr = (half2*)src;
half2* dst_ptr = (half2*)dst;
dst_ptr[target_id] = src_ptr[tid];
}
template<typename T>
__global__
void transpose_rebuild_padding(T* src, T* dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head,
const int* mask_offset)
{
// TODO: optimize this kernel?
// do remove_sequence_length_padding
const int tid = threadIdx.x; // batch * seq_len or valid_word_num
const int bid = blockIdx.x; // head_num * size_per_head
const int src_batch_id = (bid + mask_offset[bid]) / seq_len;
const int src_seq_id = (bid + mask_offset[bid]) % seq_len;
const int dst_seq_id = bid;
const int head_id = tid / size_per_head;
const int hidden_id = tid % size_per_head;
dst[dst_seq_id * head_num * size_per_head + tid] = src[ src_batch_id * head_num * seq_len * size_per_head +
head_id * seq_len * size_per_head + src_seq_id * size_per_head + hidden_id];
}
template<typename T>
__global__ void rebuild_sequence_length_padding(const T* src, T* tgt,
const int* mask_offset,
const int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int tgt_seq_id = bid + mask_offset[bid];
const int src_seq_id = bid;
for(int i = tid; i < n; i += blockDim.x)
{
tgt[tgt_seq_id * n + i] = src[src_seq_id * n + i];
}
}
template<OperationType OpType_>
void OpenMultiHeadAttention<OpType_>::multiHeadAttr_nofuse_kernelLauncher(
hipStream_t stream,
hipblasHandle_t cublas_handle,
cublasLtHandle_t cublaslt_handle,
DataType_* Q,
const DataType_* bias_Q,
DataType_* K,
const DataType_* bias_K,
DataType_* V,
const DataType_* bias_V,
const DataType_* attr_mask,
DataType_* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int int8_mode_,
const DataType_ scalar)
{
const int k = head_num * size_per_head;
dim3 grid;
dim3 block;
if (int8_mode_ != 0){
//var for int8
const float*q_buf_addBias_amax_ptr, *k_buf_addBias_amax_ptr, *v_buf_addBias_amax_ptr, *qk_afterSM_amax_ptr, *qkv_amax_ptr, *in_amax_ptr;
q_buf_addBias_amax_ptr = param_.amaxList+4;
k_buf_addBias_amax_ptr = param_.amaxList + 8;
v_buf_addBias_amax_ptr = param_.amaxList + 12;
qk_afterSM_amax_ptr = param_.amaxList + 16;
qkv_amax_ptr = param_.amaxList + 20;
in_amax_ptr = param_.amaxList;
assert(seq_len % COL32_ == 0 && size_per_head%COL32_ == 0);
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len){
hipLaunchKernelGGL(( add_QK_bias_transform), dim3(dim3(batch_size*seq_len*2)), dim3(dim3((head_num * size_per_head)/4)), 0, stream, (int8_t*)q_buf_, (int8_t*)k_buf_, (const int32_t*) Q, bias_Q, (const int32_t*) K,
bias_K, batch_size * seq_len, batch_size, seq_len, head_num, size_per_head,
seq_len*size_per_head, query_weight_amax_list, in_amax_ptr+2, key_weight_amax_list,
in_amax_ptr+2, q_buf_addBias_amax_ptr+3, k_buf_addBias_amax_ptr+3);
hipLaunchKernelGGL(( add_V_bias_transform), dim3(dim3(size_per_head/32, seq_len/32, batch_size*head_num)), dim3(dim3(8, 32)), 0, stream, (int8_t*)v_buf_, (const int32_t *)V, bias_V, batch_size, seq_len,
head_num, size_per_head, seq_len*size_per_head, value_weight_amax_list,
in_amax_ptr+2, v_buf_addBias_amax_ptr+3);
}
else{
hipMemset(sequence_id_map_, -1, batch_size * seq_len * sizeof(int));
hipLaunchKernelGGL(( mappingRemovePaddingData), dim3(dim3((param_.valid_word_num + 63)/64)), dim3(dim3(64)), 0, 0, sequence_id_map_, param_.sequence_id_offset, param_.valid_word_num);
hipLaunchKernelGGL(( add_QK_bias_transform_rebuild_padding), dim3(dim3(param_.valid_word_num*2)), dim3(dim3((head_num * size_per_head)/4)), 0, stream, (int8_t*)q_buf_, (int8_t*)k_buf_, (const int32_t*) Q, bias_Q,
(const int32_t*) K, bias_K, param_.sequence_id_offset, param_.valid_word_num,
batch_size * seq_len, batch_size, seq_len, head_num, size_per_head, seq_len*size_per_head,
query_weight_amax_list, in_amax_ptr+2, key_weight_amax_list, in_amax_ptr+2,
q_buf_addBias_amax_ptr+3, k_buf_addBias_amax_ptr+3);
hipLaunchKernelGGL(( add_V_bias_transform_rebuild_padding), dim3(dim3(size_per_head/32, seq_len/32, batch_size*head_num)), dim3(dim3(8, 32)), 0, stream, (int8_t*)v_buf_, (const int32_t *)V, bias_V, sequence_id_map_,
param_.valid_word_num, batch_size, seq_len, head_num,
size_per_head, seq_len*size_per_head, value_weight_amax_list,
in_amax_ptr+2, v_buf_addBias_amax_ptr+3);
}
int batchCount = batch_size * head_num;
cublasLtMM_withAlgo(qk_int_buf_, batchCount, seq_len, seq_len, size_per_head,
size_per_head*seq_len, size_per_head*seq_len, seq_len*seq_len,
(int8_t*)q_buf_, (int8_t*)k_buf_, cublaslt_handle, stream, cublasLtAlgoMap);
grid.x = seq_len;
grid.y = batch_size;
grid.z = head_num;
if (seq_len <= 32){
if (batch_size * head_num > 960)
grid.x = ceil(float(seq_len)/32.0f);
block.x = (seq_len + 31)/32*32;
hipLaunchKernelGGL(( softmax_COL32_LE32), dim3(grid), dim3(block), 0, stream, (int8_t*)qk_buf_, qk_int_buf_, attr_mask, batch_size, head_num,
seq_len, float(scalar), q_buf_addBias_amax_ptr + 1, k_buf_addBias_amax_ptr + 1,
qk_afterSM_amax_ptr, seq_len*head_num, seq_len*seq_len);
}
else if (seq_len <= 64){
assert(seq_len % 2 == 0);
block.x = (seq_len/2 + 31)/32*32;
if (batch_size * head_num > 960)
grid.x = ceil(float(seq_len)/32.0f);
hipLaunchKernelGGL(( softmax_COL32_LE64), dim3(grid), dim3(block), 0, stream, (int8_t*)qk_buf_, qk_int_buf_, attr_mask, batch_size, head_num,
seq_len, float(scalar), q_buf_addBias_amax_ptr + 1, k_buf_addBias_amax_ptr + 1,
qk_afterSM_amax_ptr, seq_len*head_num, seq_len*seq_len);
}
else
{
assert(seq_len % 4 == 0);
block.x = (seq_len/4 + 31)/32*32;
hipLaunchKernelGGL(( softmax_COL32), dim3(grid), dim3(block), 0, stream, (int8_t*)qk_buf_, qk_int_buf_, attr_mask, batch_size, head_num,
seq_len, float(scalar), q_buf_addBias_amax_ptr + 1, k_buf_addBias_amax_ptr + 1,
qk_afterSM_amax_ptr, seq_len*head_num, seq_len*seq_len);
}
cublasLtMM_withAlgo(transpose_dst_int_buf_, batchCount, seq_len, size_per_head, seq_len,
seq_len*seq_len, size_per_head*seq_len, size_per_head*seq_len, (int8_t*)qk_buf_,
(int8_t*)v_buf_, cublaslt_handle, stream, cublasLtAlgoMap);
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len){
transpose_COL32_kernelLauncher((int8_t*)dst, (const int*)transpose_dst_int_buf_, batch_size, seq_len, head_num,
size_per_head, v_buf_addBias_amax_ptr+1, qk_afterSM_amax_ptr+1, qkv_amax_ptr+3, stream);
}
else{
transpose_COL32_rebuild_padding_kernelLauncher((int8_t*)dst, (const int*)transpose_dst_int_buf_, sequence_id_map_,
param_.valid_word_num, batch_size, seq_len, head_num, size_per_head,
v_buf_addBias_amax_ptr+1, qk_afterSM_amax_ptr+1, qkv_amax_ptr+3, stream);
}
}
//FP32/FP16
else{
if(OpType_ == OperationType::FP32)
{
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len)
{
const int m = batch_size * seq_len;
const int word_per_block = 1;
assert(k <= 1024);
assert(m / word_per_block * 3 <= 65536);
dim3 grid(m / word_per_block * 3);
dim3 block(k);
hipLaunchKernelGGL(( add_QKV_bias<DataType_>), dim3(grid), dim3(block), 0, stream, Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head, word_per_block);
}
else
{
hipLaunchKernelGGL(( add_QKV_bias_rebuild_padding<DataType_>), dim3(param_.valid_word_num), dim3(k), 0, stream, Q, bias_Q, K, bias_K,
V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head, param_.sequence_id_offset);
}
}
else
{
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len)
{
const int word_per_block = 1;
grid.x = batch_size * seq_len / word_per_block;
block.x = head_num * size_per_head * word_per_block / 2;
assert(block.x <= 1024);
hipLaunchKernelGGL(( add_QKV_bias<DataType_>), dim3(grid), dim3(block), 0, stream, Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_,
v_buf_, batch_size, seq_len, head_num, size_per_head / 2, word_per_block);
}
else
{
hipLaunchKernelGGL(( add_QKV_bias_rebuild_padding<half2>), dim3(param_.valid_word_num), dim3(k / 2), 0, stream, (half2*)Q, (const half2*)bias_Q,
(half2*)K, (const half2*)bias_K, (half2*)V, (const half2*)bias_V,
(half2*)q_buf_, (half2*)k_buf_, (half2*)v_buf_,
batch_size, seq_len, head_num, size_per_head / 2, param_.sequence_id_offset);
}
}
DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f;
check_cuda_error(hipblasGemmStridedBatchedEx(cublas_handle,
HIPBLAS_OP_T, HIPBLAS_OP_N,
seq_len, seq_len, size_per_head,
&alpha,
k_buf_, AType_, size_per_head, seq_len * size_per_head,
q_buf_, BType_, size_per_head, seq_len * size_per_head,
&beta,
qk_buf_, CType_, seq_len, seq_len * seq_len,
batch_size * head_num,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[1])));
//deal with odd seq_len
if (seq_len % 2 != 0){
if(seq_len <= 32)
block.x = 32;
else if(seq_len > 32 && seq_len <= 64)
block.x = 64;
else if(seq_len > 64 && seq_len <= 128)
block.x = 128;
else if(seq_len > 128 && seq_len <= 256)
block.x = 256;
else if(seq_len > 256 && seq_len <= 512)
block.x = 512;
else
block.x = 1024;
if(batch_size * head_num <= 120)
{
grid.x = batch_size * head_num * seq_len;
hipLaunchKernelGGL(( softmax_kernel_v2<DataType_>), dim3(grid), dim3(block), 0, stream, qk_buf_, attr_mask, batch_size, head_num, seq_len, scalar);
}
else
{
grid.x = batch_size * head_num;
hipLaunchKernelGGL(( softmax_kernel<DataType_>), dim3(grid), dim3(block), 0, stream, qk_buf_, attr_mask, batch_size, head_num, seq_len, scalar);
}
}
//deal with even seq_len
else{
grid.x = seq_len;
if (batch_size * head_num > 360)
grid.x = ceil(float(seq_len)/32.0f);
grid.y = batch_size;
grid.z = head_num;
if (seq_len <= 32){
block.x = 32;
hipLaunchKernelGGL(( softmax_kernel_v3_LE32<DataType_>), dim3(grid), dim3(block), 0, stream, qk_buf_, attr_mask, batch_size, head_num, seq_len, scalar);
}
else{
if (OpType_ == OperationType::FP16){
block.x = (seq_len/2 + 31)/32*32;
hipLaunchKernelGGL(( softmax_kernel_v3), dim3(grid), dim3(block), 0, stream, qk_buf_, attr_mask, batch_size, head_num, seq_len, scalar);
}
else{
block.x = (seq_len + 31)/32*32;
hipLaunchKernelGGL(( softmax_kernel_v3<DataType_>), dim3(grid), dim3(block), 0, stream, qk_buf_, attr_mask, batch_size, head_num, seq_len, scalar);
}
}
grid.x = grid.y = grid.z = 1;
}
check_cuda_error(hipblasGemmStridedBatchedEx(cublas_handle,
HIPBLAS_OP_N, HIPBLAS_OP_N,
size_per_head, seq_len, seq_len,
&alpha,
v_buf_, AType_, size_per_head, seq_len * size_per_head,
qk_buf_, BType_, seq_len, seq_len * seq_len,
&beta,
transpose_dst_, CType_, size_per_head, seq_len * size_per_head,
batch_size * head_num,
computeType_,
static_cast<hipblasGemmAlgo_t>(cublasAlgo_[2])));
/* for half2 only */
if(OpType_ == OperationType::FP16)
{
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len)
{
const int seq_per_block = 4;
grid.x = batch_size * head_num * seq_len / seq_per_block;
block.x = seq_per_block * size_per_head / 2;
assert(grid.x * seq_per_block == batch_size * head_num * seq_len);
hipLaunchKernelGGL(( transpose<DataType_>), dim3(grid), dim3(block), 0, stream, transpose_dst_, dst,
batch_size, seq_len, head_num, size_per_head / 2);
}
else
{
hipLaunchKernelGGL(( transpose_rebuild_padding<half2>), dim3(param_.valid_word_num), dim3(k / 2), 0, stream,
(half2*)transpose_dst_, (half2*)dst,
batch_size, seq_len, head_num, size_per_head / 2, param_.sequence_id_offset);
}
}
else
{
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len)
{
const int seq_per_block = 1;
grid.x = batch_size * head_num * seq_len / seq_per_block;
block.x = seq_per_block * size_per_head;
hipLaunchKernelGGL(( transpose<DataType_>), dim3(grid), dim3(block), 0, stream, transpose_dst_, dst,
batch_size, seq_len, head_num, size_per_head);
}
else
{
hipLaunchKernelGGL(( transpose_rebuild_padding<DataType_>), dim3(param_.valid_word_num), dim3(k), 0, stream, transpose_dst_, dst,
batch_size, seq_len, head_num, size_per_head, param_.sequence_id_offset);
}
}
}
}
template void OpenMultiHeadAttention<OperationType::FP32>::multiHeadAttr_nofuse_kernelLauncher(
hipStream_t stream,
hipblasHandle_t handle,
cublasLtHandle_t cublaslt_handle,
float* Q,
const float* bias_Q,
float* K,
const float* bias_K,
float* V,
const float* bias_V,
const float* attr_mask,
float* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int int8_mode_,
const float scalar);
template void OpenMultiHeadAttention<OperationType::FP16>::multiHeadAttr_nofuse_kernelLauncher(
hipStream_t stream,
hipblasHandle_t handle,
cublasLtHandle_t cublaslt_handle,
half* Q,
const half* bias_Q,
half* K,
const half* bias_K,
half* V,
const half* bias_V,
const half* attr_mask,
half* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int int8_mode_,
const half scalar);
}//namespace cuda
}//namespace fastertransformer
| 3da2c88173b53f65a6964f6f6f257b89f2df6b7d.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Open sourced multi-head attention
**/
#include "fastertransformer/allocator.h"
#include "fastertransformer/cuda/multi_head_attention.h"
#include "fastertransformer/cuda/open_attention.h"
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cmath>
namespace fastertransformer{
namespace cuda{
/**
* Multi-head attetion open sourced
*/
#define FINAL_MASK 0xffffffff
template <typename T>
__inline__ __device__
T warpReduceSum(T val)
{
#pragma unroll
for(int mask = 16; mask > 0; mask >>= 1)
val += __shfl_xor_sync(FINAL_MASK, val, mask, 32);
return val;
}
/* Calculate the sum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceSum(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f;
int wid = threadIdx.x >> 5;
val = warpReduceSum<T>(val);
if(lane == 0)
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : (T)(0.0f);
val = warpReduceSum<T>(val);
return val;
}
template <typename T>
__inline__ __device__
T warpReduceMax(T val)
{
#pragma unroll
for(int mask = 16; mask > 0; mask >>= 1)
val = max(val, __shfl_xor_sync(FINAL_MASK, val, mask, 32));
return val;
}
/* Calculate the maximum of all elements in a block */
template <typename T>
__inline__ __device__
T blockReduceMax(T val)
{
static __shared__ T shared[32];
int lane = threadIdx.x & 0x1f; // in-warp idx
int wid = threadIdx.x >> 5; // warp idx
val = warpReduceMax(val); // get maxx in each warp
if(lane == 0) // record in-warp maxx by warp Idx
shared[wid] = val;
__syncthreads();
val = (threadIdx.x < (blockDim.x >> 5 )) ? shared[lane] : -1e20f;
val = warpReduceMax(val);
return val;
}
__inline__ __device__
int target_index(int id1, int id2, int id3, int id4, int dim_1, int dim_2, int dim_3, int dim_4)
{
return id1 * (dim_2 * dim_3 * dim_4) + id3 * (dim_2 * dim_4) + id2 * dim_4 + id4;
}
//build a mapping for fullData to removePaddingData
//grid((valid_word_num+63)/64)
//block(64)
__global__ void mappingRemovePaddingData(int *mapping, const int* sequence_id_offset, const int valid_word_num){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < valid_word_num)
mapping[idx + __ldg(sequence_id_offset + idx)] = idx;
}
//add_QK_bias_transform for batch int8 cublasLtMatmul & per axis quantization for weight
//1.add QK bias
//2.transform each Q K CUBLASLT_ORDER_COL32 matrixes into a series of sub-matrix (with CUBLASLT_ORDER_COL32/CUBLASLT_ORDER_COL4_4R2_8C layout)
// Q, K are CUBLASLT_ORDER_COL32 matrixes of m = batch_size * seq_len, n = head_num * size_per_head
// q_buf_ is of batchCount = batch_size * head_num, m = seq_len, n = size_per_head, CUBLASLT_ORDER_COL32
// k_buf_ is of batchCount = batch_size * head_num, m = seq_len, n = size_per_head, CUBLASLT_ORDER_COL4_4R2_8C
//only for int32 input & int8 output
//seq_len, size_per_head must be a multiple of 32
//grid.x = batch_size * seq_len * 2;
//block.x = head_num * size_per_head / 4;
//using char4
template <typename T>
__global__
void add_QK_bias_transform(int8_t *q_buf_, int8_t *k_buf_, const int32_t* Q, const T* bias_Q,
const int32_t* K, const T* bias_K, const int m, const int batch_size,
const int seq_len, const int head_num, const int size_per_head, int stride,
const float * q_weight_amax, const float *q_input_deQFactor_div127_ptr, const float * k_weight_amax,
const float *k_input_deQFactor_div127_ptr, const float *q_output_scale_ptr, const float *k_output_scale_ptr)
{
const int32_t* data_ptr;
char4* buf_ptr4;
const T* bias_ptr;
const float* weight_amax;
int qk_id = blockIdx.x / m;
data_ptr = qk_id == 0 ? Q : K;
buf_ptr4 = qk_id == 0 ? (char4*)q_buf_ : (char4*)k_buf_;
bias_ptr = qk_id == 0 ? bias_Q : bias_K;
const float input_deQFactor_div127 = qk_id == 0 ? __ldg(q_input_deQFactor_div127_ptr) : __ldg(k_input_deQFactor_div127_ptr);
weight_amax = qk_id == 0 ? q_weight_amax : k_weight_amax;
const float output_scale = qk_id == 0 ? __ldg(q_output_scale_ptr) : __ldg(k_output_scale_ptr);
int threadIdx4 = threadIdx.x << 2;
int batch_id = (blockIdx.x % m) / seq_len;
int head_id = threadIdx4 / size_per_head;
int id_in_head = threadIdx4 % size_per_head;
int word_id = blockIdx.x % seq_len;
int data_id = (((threadIdx4 >> 5) << 5)*m + ((blockIdx.x%m) << 5) + (threadIdx4&31));
float scale;
float tmp;
char4 tmp4;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.x = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4)* input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.y = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.z = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.w = float_to_int8_rn(tmp*output_scale);
//row_id, col_id of sub-matrix (m = seq_len, n = size_per_head), column-major
int row_id = word_id;
int col_id = id_in_head;
//new (row, rol) of LtTrans COL32/COL4 sub-matrix, leading dim = (COL32_ * seq_len)
int new_col = col_id >> 5;
int new_row = (qk_id != 1) ?
//COL32
((row_id << 5) + (col_id&31))
:
//COL4
////row_id/8 is the number of tile of (8 rows 32 columns) -- column-major
////row_id%2 is even row, otherwise odd row
////col_id%COL32_/8 is the number tile of (8 rows 8 columns)
(
((((row_id >> 3) << 3) + ((row_id&1) << 2) + ((col_id&31) >> 3)) << 5) +
////col_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(row_id%8/2) is (the row id of alternating 4 rows) - 1
(((((col_id&7) >= 4)?4:0) + ((row_id&7) >> 1)) << 2) +
////col_id%4 is the id of 4 cols
(col_id&3)
)
;
buf_ptr4[(((batch_id*head_num + head_id) * stride + (new_col << 5)*seq_len + new_row) >> 2)] = tmp4;
}
//add_QK_bias_transform & rebuild padding for batch int8 cublasLtMatmul & per axis quantization for weight
//1.add QK bias
//2.transform each Q K CUBLASLT_ORDER_COL32 matrixes into a series of sub-matrix (with CUBLASLT_ORDER_COL32/CUBLASLT_ORDER_COL4_4R2_8C layout)
// Q, K are CUBLASLT_ORDER_COL32 matrixes of m = valid_word_num, n = head_num * size_per_head
// q_buf_ is of batchCount = batch_size * head_num, m = seq_len, n = size_per_head, CUBLASLT_ORDER_COL32
// k_buf_ is of batchCount = batch_size * head_num, m = seq_len, n = size_per_head, CUBLASLT_ORDER_COL4_4R2_8C
//only for int32 input & int8 output
//seq_len, size_per_head must be a multiple of 32
//grid.x = valid_word_num * 2;
//block.x = head_num * size_per_head / 4;
//using char4
template <typename T>
__global__
void add_QK_bias_transform_rebuild_padding(int8_t *q_buf_, int8_t *k_buf_, const int32_t* Q, const T* bias_Q,
const int32_t* K, const T* bias_K, const int* sequence_id_offset,
const int valid_word_num, const int m, const int batch_size, const int seq_len,
const int head_num, const int size_per_head, int stride, const float * q_weight_amax,
const float *q_input_deQFactor_div127_ptr, const float * k_weight_amax,
const float *k_input_deQFactor_div127_ptr, const float *q_output_scale_ptr, const float *k_output_scale_ptr)
{
const int32_t* data_ptr;
char4* buf_ptr4;
const T* bias_ptr;
const float* weight_amax;
int qk_id = blockIdx.x / valid_word_num;
data_ptr = qk_id == 0 ? Q : K;
buf_ptr4 = qk_id == 0 ? (char4*)q_buf_ : (char4*)k_buf_;
bias_ptr = qk_id == 0 ? bias_Q : bias_K;
int threadIdx4 = threadIdx.x << 2;
int m_full_idx = blockIdx.x % valid_word_num;
m_full_idx = (valid_word_num != m) ? (m_full_idx + __ldg(sequence_id_offset+m_full_idx)) : m_full_idx;
int batch_id = m_full_idx / seq_len;
int head_id = threadIdx4 / size_per_head;
int id_in_head = threadIdx4 % size_per_head;
int word_id = m_full_idx % seq_len;
const float input_deQFactor_div127 = qk_id == 0 ? __ldg(q_input_deQFactor_div127_ptr) : __ldg(k_input_deQFactor_div127_ptr);
weight_amax = qk_id == 0 ? q_weight_amax : k_weight_amax;
const float output_scale = qk_id == 0 ? __ldg(q_output_scale_ptr) : __ldg(k_output_scale_ptr);
int data_id = (((threadIdx4 >> 5) << 5)*valid_word_num + ((blockIdx.x%valid_word_num) << 5) + (threadIdx4&31));
float scale;
float tmp;
char4 tmp4;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.x = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4)* input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.y = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.z = float_to_int8_rn(tmp*output_scale);
data_id = data_id+1;
threadIdx4 = threadIdx4+1;
scale = static_cast<float>(__ldg(data_ptr+data_id)) * __ldg(weight_amax+threadIdx4) * input_deQFactor_div127;
tmp = static_cast<float>(__ldg(bias_ptr+threadIdx4)) + scale;
tmp4.w = float_to_int8_rn(tmp*output_scale);
//row_id, col_id of sub-matrix (m = seq_len, n = size_per_head), column-major
int row_id = word_id;
int col_id = id_in_head;
//new (row, rol) of LtTrans COL32/COL4 sub-matrix, leading dim = (COL32_ * seq_len)
int new_col = col_id >> 5;
int new_row = (qk_id != 1) ?
//COL32
((row_id << 5) + (col_id&31))
:
//COL4
////row_id/8 is the number of tile of (8 rows 32 columns) -- column-major
////row_id%2 is even row, otherwise odd row
////col_id%COL32_/8 is the number tile of (8 rows 8 columns)
(
((((row_id >> 3) << 3) + ((row_id&1) << 2) + ((col_id&31) >> 3)) << 5) +
////col_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(row_id%8/2) is (the row id of alternating 4 rows) - 1
(((((col_id&7) >= 4)?4:0) + ((row_id&7) >> 1)) << 2) +
////col_id%4 is the id of 4 cols
(col_id&3)
)
;
buf_ptr4[(((batch_id*head_num + head_id) * stride + (new_col << 5)*seq_len + new_row) >> 2)] = tmp4;
}
//input matrix a matrix of m = batch_size*seq_len , n = head_num*size_per_head, CUBLASLT_ORDER_COL32
//output matrixes are a series of sub-matrixes with size of m = size_per_head, n = seq_len , CUBLASLT_ORDER_COL4_4R2_8C
//only for int32_t Input int8_t Output
//seq_len, size_per_head must be a multiple of 32
//grid = (size_per_head/32, seq_len/32, batch_size*head_num)
//block = (8, 32);
//using char4
//per axis quantization for weight
template <typename T>
__global__
void add_V_bias_transform(int8_t *v_buf_, const int32_t *V, const T *V_bias, const int batch_size, const int seq_len,
const int head_num, const int size_per_head, int stride, const float* weight_amax,
const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
__shared__ int8_t shm[32][33];
const int32_t* data_ptr = V;
char4* buf_ptr4 = (char4*) v_buf_;
const T* bias_ptr = V_bias;
int threadIdx4 = threadIdx.x << 2;
//for src of (seq_len, size_per_head)
int batch_id = blockIdx.z/head_num;
int head_id = blockIdx.z%head_num;
int word_id = (blockIdx.y << 5) + threadIdx.y;
int id_in_size = (blockIdx.x << 5) + threadIdx4;
//for V layout (batch_size*seq_len, head_num*size_per_head)
int col = head_id*size_per_head + id_in_size;
int row = batch_id*seq_len + word_id;
int inIdx = (((col >> 5) << 5)*batch_size*seq_len + ((row << 5) + (col&31)));
//for shm row-major
int sh_col = threadIdx4;
int sh_row = threadIdx.y;
float tmp;
float scale;
//const half2* bias_ptr2 = (const half2*)bias_ptr;
//half2 tmp2;
//tmp2 = __ldg(&bias_ptr2[col >> 1]);
scale = __ldg(data_ptr + inIdx) * __ldg(weight_amax + col) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr + col));//(tmp2.x);
shm[sh_row][sh_col] = float_to_int8_rn(tmp*out_scale);
scale = __ldg(data_ptr + inIdx + 1) * __ldg(weight_amax + col + 1) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+1));//(tmp2.y);
shm[sh_row][sh_col+1] = float_to_int8_rn(tmp*out_scale);
//tmp2 = __ldg(&bias_ptr2[(col >> 1) + 1]);
scale = __ldg(data_ptr+inIdx+2) * __ldg(weight_amax+col+2) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+2));//(tmp2.x);
shm[sh_row][sh_col+2] = float_to_int8_rn(tmp*out_scale);
scale = __ldg(data_ptr+inIdx + 3) * __ldg(weight_amax+col+3) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+3));//(tmp2.y);
shm[sh_row][sh_col+3] = float_to_int8_rn(tmp*out_scale);
__syncthreads();
//for dst of (size_per_head, seq_len)
word_id = (blockIdx.y << 5) + threadIdx4;
id_in_size = (blockIdx.x << 5) + threadIdx.y;
col = (word_id >> 5);
row = (
//COL4
////id_in_size/8 is the number of tile of (8 rows 32 columns) -- column-major
////id_in_size%2 is even row, otherwise odd row
////word_id%COL32_/8 is the number tile of (8 rows 8 columns)
((((id_in_size >> 3) << 3) + ((id_in_size&1) << 2) + ((word_id&31) >> 3)) << 5) +
////word_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(id_in_size%8/2) is (the row id of alternating 4 rows) - 1
(((((word_id&7) >= 4)?4:0) + ((id_in_size&7) >> 1)) << 2) +
////word_id%4 is the id of 4 cols
(word_id&3)
);
char4 dataTmp;
dataTmp.x = shm[sh_col][sh_row];
dataTmp.y = shm[sh_col+1][sh_row];
dataTmp.z = shm[sh_col+2][sh_row];
dataTmp.w = shm[sh_col+3][sh_row];
buf_ptr4[(blockIdx.z*stride + (col << 5)*size_per_head + row) >> 2] = dataTmp;
}
template <>
__global__
void add_V_bias_transform(int8_t *v_buf_, const int32_t *V, const half *V_bias, const int batch_size, const int seq_len,
const int head_num, const int size_per_head, int stride, const float* weight_amax,
const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
__shared__ int8_t shm[32][33];
const int32_t* data_ptr = V;
char4* buf_ptr4 = (char4*) v_buf_;
int threadIdx4 = threadIdx.x << 2;
//for src of (seq_len, size_per_head)
int batch_id = blockIdx.z/head_num;
int head_id = blockIdx.z%head_num;
int blockIdy32 = (blockIdx.y << 5);
int blockIdx32 = (blockIdx.x << 5);
int word_id = blockIdy32 + threadIdx.y;
int id_in_size = blockIdx32 + threadIdx4;
//for V layout (batch_size*seq_len, head_num*size_per_head)
int col = head_id*size_per_head + id_in_size;
int row = batch_id*seq_len + word_id;
int inIdx = ((col & 0xffffffe0)*batch_size*seq_len + ((row << 5) + (col&31)));
//for shm row-major
int sh_col = threadIdx4;
int sh_row = threadIdx.y;
int col_2 = col >> 1;
float scale;
const half2* bias_ptr2 = (const half2*)V_bias;
half2 tmp2;
tmp2 = __ldg(bias_ptr2+col_2);
scale = __ldg(data_ptr+inIdx) * __ldg(weight_amax+col) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.x);
shm[sh_row][sh_col] = float_to_int8_rn(scale*out_scale);
scale = __ldg(data_ptr+inIdx+1) * __ldg(weight_amax+col+1) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.y);
shm[sh_row][sh_col+1] = float_to_int8_rn(scale*out_scale);
tmp2 = __ldg(bias_ptr2 + col_2 + 1);
scale = __ldg(data_ptr + inIdx + 2) * __ldg(weight_amax + col + 2) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.x);
shm[sh_row][sh_col+2] = float_to_int8_rn(scale*out_scale);
scale = __ldg(data_ptr + inIdx + 3) * __ldg(weight_amax + col + 3) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.y);
shm[sh_row][sh_col+3] = float_to_int8_rn(scale*out_scale);
__syncthreads();
//for dst of (size_per_head, seq_len)
word_id = blockIdy32 + threadIdx4;
id_in_size = blockIdx32 + threadIdx.y;
col = (word_id >> 5);
row = (
//COL4
////id_in_size/8 is the number of tile of (8 rows 32 columns) -- column-major
////id_in_size%2 is even row, otherwise odd row
////word_id%COL32_/8 is the number tile of (8 rows 8 columns)
(((id_in_size & 0xfffffff8) + ((id_in_size&1) << 2) + ((word_id&31) >> 3)) << 5) +
////word_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(id_in_size%8/2) is (the row id of alternating 4 rows) - 1
(((((word_id&7) >= 4)?4:0) + ((id_in_size&7) >> 1)) << 2) +
////word_id%4 is the id of 4 cols
(word_id&3)
);
char4 dataTmp;
dataTmp.x = shm[sh_col][sh_row];
dataTmp.y = shm[sh_col+1][sh_row];
dataTmp.z = shm[sh_col+2][sh_row];
dataTmp.w = shm[sh_col+3][sh_row];
buf_ptr4[(blockIdx.z*stride + (col << 5)*size_per_head + row) >> 2] = dataTmp;
}
//add bias into V & rebuild padding
//input matrix a matrix of m = valid_word_num, n = head_num*size_per_head, CUBLASLT_ORDER_COL32
//output matrixes are a series of sub-matrixes with size of m = size_per_head, n = seq_len , CUBLASLT_ORDER_COL4_4R2_8C
//only for int32_t Input int8_t Output
//seq_len, size_per_head must be a multiple of 32
//grid = (size_per_head/32, seq_len/32, batch_size*head_num)
//block = (8, 32);
//using char4
//per axis quantization for weight
template <typename T>
__global__
void add_V_bias_transform_rebuild_padding(int8_t *v_buf_, const int32_t *V, const T *V_bias, const int* sequence_id_map, const int valid_word_num,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, int stride,
const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
__shared__ int8_t shm[32][33];
const int32_t* data_ptr = V;
char4* buf_ptr4 = (char4*) v_buf_;
const T* bias_ptr = V_bias;
int threadIdx4 = threadIdx.x << 2;
//for src of (seq_len, size_per_head)
int batch_id = blockIdx.z/head_num;
int head_id = blockIdx.z%head_num;
int word_id = (blockIdx.y << 5) + threadIdx.y;
int id_in_size = (blockIdx.x << 5) + threadIdx4;
//for shm row-major
int sh_col = threadIdx4;
int sh_row = threadIdx.y;
//for V layout (batch_size*seq_len, head_num*size_per_head)
int col;
int row = __ldg(sequence_id_map + batch_id*seq_len + word_id);
if (row != -1){
col = head_id*size_per_head + id_in_size;
int inIdx = ((col & 0xffffffe0)*valid_word_num + ((row << 5) + (col&31)));
float tmp;
float scale;
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
scale = __ldg(data_ptr + inIdx) * __ldg(weight_amax + col) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr + col));
shm[sh_row][sh_col] = float_to_int8_rn(tmp*out_scale);
scale = __ldg(data_ptr + inIdx + 1) * __ldg(weight_amax + col + 1) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+1));
shm[sh_row][sh_col+1] = float_to_int8_rn(tmp*out_scale);
scale = __ldg(data_ptr+inIdx+2) * __ldg(weight_amax+col+2) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+2));
shm[sh_row][sh_col+2] = float_to_int8_rn(tmp*out_scale);
scale = __ldg(data_ptr+inIdx + 3) * __ldg(weight_amax+col+3) * input_deQFactor_div127;
tmp = scale + static_cast<float>(__ldg(bias_ptr+col+3));
shm[sh_row][sh_col+3] = float_to_int8_rn(tmp*out_scale);
}
else{
shm[sh_row][sh_col] = shm[sh_row][sh_col + 1] = shm[sh_row][sh_col + 2] = shm[sh_row][sh_col + 3] = 0;
}
__syncthreads();
char4 dataTmp;
dataTmp.x = shm[sh_col][sh_row];
dataTmp.y = shm[sh_col+1][sh_row];
dataTmp.z = shm[sh_col+2][sh_row];
dataTmp.w = shm[sh_col+3][sh_row];
//for dst of (size_per_head, seq_len)
word_id = (blockIdx.y << 5) + threadIdx4;
id_in_size = (blockIdx.x << 5) + threadIdx.y;
col = (word_id >> 5);
row = (
//COL4
////id_in_size/8 is the number of tile of (8 rows 32 columns) -- column-major
////id_in_size%2 is even row, otherwise odd row
////word_id%COL32_/8 is the number tile of (8 rows 8 columns)
(((id_in_size & 0xfffffff8) + ((id_in_size&1) << 2) + ((word_id&31) >> 3)) << 5) +
////word_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(id_in_size%8/2) is (the row id of alternating 4 rows) - 1
(((((word_id&7) >= 4)?4:0) + ((id_in_size&7) >> 1)) << 2) +
////word_id%4 is the id of 4 cols
(word_id&3)
);
buf_ptr4[(blockIdx.z*stride + (col << 5)*size_per_head + row) >> 2] = dataTmp;
}
template <>
__global__
void add_V_bias_transform_rebuild_padding(int8_t *v_buf_, const int32_t *V, const half *V_bias, const int* sequence_id_map, const int valid_word_num,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, int stride,
const float* weight_amax, const float *input_deQFactor_div127_ptr, const float *out_scale_ptr)
{
__shared__ int8_t shm[32][33];
const int32_t* data_ptr = V;
char4* buf_ptr4 = (char4*) v_buf_;
int threadIdx4 = threadIdx.x << 2;
//for src of (seq_len, size_per_head)
int batch_id = blockIdx.z/head_num;
int head_id = blockIdx.z%head_num;
int blockIdy32 = (blockIdx.y << 5);
int blockIdx32 = (blockIdx.x << 5);
int word_id = blockIdy32 + threadIdx.y;
int id_in_size = blockIdx32 + threadIdx4;
//for shm row-major
int sh_col = threadIdx4;
int sh_row = threadIdx.y;
//for V layout (batch_size*seq_len, head_num*size_per_head)
int col;
int row = __ldg(sequence_id_map + batch_id*seq_len + word_id);
if (row >= 0){
const float input_deQFactor_div127 = __ldg(input_deQFactor_div127_ptr);
const float out_scale = __ldg(out_scale_ptr);
col = head_id*size_per_head + id_in_size;
int inIdx = ((col & 0xffffffe0)*valid_word_num + ((row << 5) + (col&31)));
int col_2 = col >> 1;
float scale;
const half2* bias_ptr2 = (const half2*)V_bias;
half2 tmp2;
tmp2 = __ldg(bias_ptr2+col_2);
scale = __ldg(data_ptr+inIdx) * __ldg(weight_amax+col) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.x);
shm[sh_row][sh_col] = float_to_int8_rn(scale*out_scale);
scale = __ldg(data_ptr+inIdx+1) * __ldg(weight_amax+col+1) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.y);
shm[sh_row][sh_col+1] = float_to_int8_rn(scale*out_scale);
tmp2 = __ldg(bias_ptr2 + col_2 + 1);
scale = __ldg(data_ptr + inIdx + 2) * __ldg(weight_amax + col + 2) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.x);
shm[sh_row][sh_col+2] = float_to_int8_rn(scale*out_scale);
scale = __ldg(data_ptr + inIdx + 3) * __ldg(weight_amax + col + 3) * input_deQFactor_div127;
scale = scale + static_cast<float>(tmp2.y);
shm[sh_row][sh_col+3] = float_to_int8_rn(scale*out_scale);
}
else{
shm[sh_row][sh_col] = shm[sh_row][sh_col + 1] = shm[sh_row][sh_col + 2] = shm[sh_row][sh_col + 3] = 0;
}
__syncthreads();
char4 dataTmp;
dataTmp.x = shm[sh_col][sh_row];
dataTmp.y = shm[sh_col+1][sh_row];
dataTmp.z = shm[sh_col+2][sh_row];
dataTmp.w = shm[sh_col+3][sh_row];
//for dst of (size_per_head, seq_len)
word_id = blockIdy32 + threadIdx4;
id_in_size = blockIdx32 + threadIdx.y;
col = (word_id >> 5);
row = (
//COL4
////id_in_size/8 is the number of tile of (8 rows 32 columns) -- column-major
////id_in_size%2 is even row, otherwise odd row
////word_id%COL32_/8 is the number tile of (8 rows 8 columns)
(((id_in_size & 0xfffffff8) + ((id_in_size&1) << 2) + ((word_id&31) >> 3)) << 5) +
////word_id%8 >= 4 is the right half of (8 rows 8 columns) tile
////(id_in_size%8/2) is (the row id of alternating 4 rows) - 1
(((((word_id&7) >= 4)?4:0) + ((id_in_size&7) >> 1)) << 2) +
////word_id%4 is the id of 4 cols
(word_id&3)
);
buf_ptr4[(blockIdx.z*stride + (col << 5)*size_per_head + row) >> 2] = dataTmp;
}
template<typename T>
__global__
void add_QKV_bias(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int word_per_block)
{
T* data_ptr;
T* buf_ptr;
const T* bias_ptr;
int m = batch_size * seq_len;
int n = head_num * size_per_head;
int qkv_id = blockIdx.x * word_per_block / m;
int row_offset = (blockIdx.x * word_per_block % m) * n;
if(qkv_id == 0)
{
data_ptr = Q + row_offset;
buf_ptr = q_buf_;
bias_ptr = bias_Q;
}
else if(qkv_id == 1)
{
data_ptr = K + row_offset;
buf_ptr = k_buf_;
bias_ptr = bias_K;
}
else
{
data_ptr = V + row_offset;
buf_ptr = v_buf_;
bias_ptr = bias_V;
}
int batch_id = (blockIdx.x * word_per_block % m) / seq_len;
int head_id = threadIdx.x / size_per_head;
int id_in_head = threadIdx.x % size_per_head;
int word_start_id = (blockIdx.x * word_per_block) % seq_len;
T bias = __ldg(&bias_ptr[threadIdx.x]);
for(int i = word_start_id; i < word_start_id + word_per_block; ++i)
{
T tmp = data_ptr[threadIdx.x] + bias;
int target_id = batch_id * (seq_len * head_num * size_per_head) + head_id * seq_len * size_per_head +
i * size_per_head + id_in_head;
buf_ptr[target_id] = tmp;
data_ptr += n;
}
}
template <>
__global__
void add_QKV_bias(half* Q, const half* bias_Q, half* K, const half* bias_K, half* V, const half* bias_V,
half* q_buf_, half* k_buf_, half* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int word_per_block)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = tid / (head_num * seq_len * size_per_head);
int seq_id = (tid % (head_num * seq_len * size_per_head)) / (head_num * size_per_head);
int head_id = (tid % (head_num * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, seq_id, head_id, id, batch_size, seq_len, head_num, size_per_head);
int bias_id = threadIdx.x;
half2* src_ptr = (half2*)Q;
half2* dst_ptr = (half2*)q_buf_;
const half2* bias_ptr = (const half2*)bias_Q;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)K;
dst_ptr = (half2*)k_buf_;
bias_ptr = (const half2*)bias_K;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
src_ptr = (half2*)V;
dst_ptr = (half2*)v_buf_;
bias_ptr = (const half2*)bias_V;
dst_ptr[target_id] = __hadd2(src_ptr[tid], __ldg(&bias_ptr[bias_id]));
}
template<typename T>
__global__
void add_QKV_bias_rebuild_padding(T* Q, const T* bias_Q, T* K, const T* bias_K, T* V, const T* bias_V, T* q_buf_, T* k_buf_, T* v_buf_,
const int batch_size, const int seq_len, const int head_num, const int size_per_head, const int* mask_offset)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int bdim = blockDim.x;
const int tgt_batch_id = (bid + mask_offset[bid]) / seq_len;
const int tgt_seq_id = (bid + mask_offset[bid]) % seq_len;
const int tgt_head_id = tid / size_per_head;
const int tgt_hidden_id = tid % size_per_head;
const int src_id = bid * bdim + tid;
const int tgt_id = tgt_batch_id * head_num * seq_len * size_per_head + \
tgt_head_id * seq_len * size_per_head + \
tgt_seq_id * size_per_head + \
tgt_hidden_id;
q_buf_[tgt_id] = Q[src_id] + bias_Q[tid];
k_buf_[tgt_id] = K[src_id] + bias_K[tid];
v_buf_[tgt_id] = V[src_id] + bias_V[tid];
}
template <typename T>
__global__
void softmax_kernel(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len,
const T scalar)
{
int batch_id = blockIdx.x / head_num;
int qk_offset = blockIdx.x * seq_len * seq_len;
int mask_offset = batch_id * seq_len * seq_len;
__shared__ float s_sum, s_max;
for(int i = 0; i < seq_len; ++i)
{
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val): -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
qk = threadIdx.x < seq_len ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk / s_sum);
qk_offset += seq_len;
mask_offset += seq_len;
}
}
template <typename T>
__global__
void softmax_kernel_v2(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num,
const int seq_len, const float scalar)
{
int batch_id = blockIdx.x / head_num / seq_len;
int seq_id = blockIdx.x % seq_len;
int qk_offset = blockIdx.x * seq_len;
int mask_offset = batch_id * seq_len * seq_len + seq_id * seq_len;
__shared__ float s_sum, s_max;
float qk = threadIdx.x < seq_len ? (float)qk_buf_[threadIdx.x + qk_offset] : 0.0f;
float mask_val = threadIdx.x < seq_len ? (float)attr_mask[threadIdx.x + mask_offset] : 0.0f;
mask_val = (1.0f - mask_val) * -10000.0f;
float tmp = threadIdx.x < seq_len ? (float)(qk * (float)scalar + mask_val) : -1e20f;
float max_val = blockReduceMax<float>(tmp);
if(threadIdx.x == 0)
s_max = max_val;
__syncthreads();
float qk_tmp = threadIdx.x < seq_len ? __expf((float)(tmp - s_max)) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if(threadIdx.x == 0)
{
s_sum = sum_val + 1e-6f;
}
__syncthreads();
if(threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / s_sum);
}
//grid = (seq_len/word_per_thread, batch_size, head_num)
//block.x = max(32, (seq_len + 31)/32*32)
template <typename T>
__global__
void softmax_kernel_v3(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len, const T scalar)
{
bool qual = threadIdx.x < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
float tmp = -1e20f;
int qk_offset;
__shared__ float s_mean, s_max;
if (qual){
qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x;
int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x;
float qk = static_cast<float>(qk_buf_[qk_offset]);
float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset]));
mask_val = (1.0f - mask_val) * -10000.0f;
tmp = qk * static_cast<float>(scalar) + mask_val;
}
float max_val = blockReduceMax<float>(tmp);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
float qk_tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = blockReduceSum<float>(qk_tmp);
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual)
qk_buf_[qk_offset] = (T)(qk_tmp * s_mean);
}
}
//grid = (seq_len/word_per_thread, batch_size, head_num)
//block.x = max(32, (seq_len/2 + 31)/32*32)
//seq_len % 2 == 0
template <>
__global__
void softmax_kernel_v3(half* qk_buf_, const half* attr_mask,
const int batch_size, const int head_num,
const int seq_len, const half scalar)
{
int threadIdx2 = threadIdx.x << 1;
bool qual = threadIdx2 < seq_len;
half2* qk_buf_half2Ptr = (half2*) qk_buf_;
const half2* attr_mask_half2Ptr = (const half2*) attr_mask;
__shared__ float s_mean, s_max;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
int qk_offset;
half2 tmp = __float2half2_rn(0.0f);
float max_val = -1e20f;
half2 qk;
if (qual){
qk_offset = ((((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len) >> 1) + threadIdx.x;
int mask_offset = (((blockIdx.y * seq_len + seq_id) * seq_len) >> 1) + threadIdx.x;
qk = qk_buf_half2Ptr[qk_offset];
half2 mask_val = __ldg(&attr_mask_half2Ptr[mask_offset]);
half2 mask_val_tmp = __hmul2(__hsub2(__float2half2_rn(1.0f), mask_val), __float2half2_rn(-10000.0f));
tmp = __hadd2(__hmul2(__half2half2(scalar), qk), mask_val_tmp);
max_val = fmax((float)tmp.x, (float)tmp.y);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
if (qual){
tmp = h2exp(__hsub2(tmp, __float2half2_rn(s_max)));
}
float sum_val = blockDim.x <= 32 ? warpReduceSum((float)(tmp.x + tmp.y)) : blockReduceSum<float>((float)(tmp.x + tmp.y));
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual){
qk = __hmul2(tmp, __float2half2_rn(s_mean));
qk_buf_half2Ptr[qk_offset] = qk;
}
}
}
//grid = (seq_len/word_per_thread, batch_size, head_num)
//block.x = max(32, (seq_len + 31)/32*32)
//for seq_len not larger than 32
template <typename T>
__global__
void softmax_kernel_v3_LE32(T* qk_buf_, const T* attr_mask, const int batch_size, const int head_num, const int seq_len, const T scalar)
{
bool qual = threadIdx.x < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
int qk_offset;
__shared__ float s_mean, s_max;
float tmp = -1e20f;
if (qual){
qk_offset = ((blockIdx.y*head_num + blockIdx.z)*seq_len + seq_id) *seq_len + threadIdx.x;
int mask_offset = (blockIdx.y * seq_len + seq_id) * seq_len + threadIdx.x;
float qk = static_cast<float>(qk_buf_[qk_offset]);
float mask_val = static_cast<float>(__ldg(&attr_mask[mask_offset]));
mask_val = (1.0f - mask_val) * -10000.0f;
tmp = static_cast<float>(qk) * static_cast<float>(scalar) + mask_val;
}
float max_val = warpReduceMax<float>(tmp);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
tmp = qual ? __expf(tmp - s_max) : 0.0f;
float sum_val = warpReduceSum<float>(tmp);
if (threadIdx.x == 0){
s_mean = sum_val + 1e-6f;
s_mean = __fdividef(1.0f, s_mean);
}
__syncthreads();
if(qual)
qk_buf_[qk_offset] = (T)(tmp * s_mean);
}
}
//int_buf are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
//grid = (seq_len, batch_size, head_num)
//block.x = max(32, (seq_len/4 + 31)/32*32)
//for int32_t I; int8 O;
template <typename T>
__global__
void softmax_COL32(int8_t* qk_buf_, const int32_t* int_buf, const T* attr_mask, const int batch_size,
const int head_num, const int seq_len, const float scalar1a, const float *scalar1b,
const float *scalar1c, const float *amax_ptr, const int head_num_x_seq_len, const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdx4 = threadIdx.x << 2;
char4* buf4Ptr = (char4 *)qk_buf_;
bool qual = threadIdx4 < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
char4 tmp4;
float4 floatTmp4 = {0.0f, 0.0f, 0.0f, 0.0f};
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) +
(threadIdx4 & 0xffffffe0) * seq_len +
(seq_id << 5) + (threadIdx4 & 31);
if (qual){
floatTmp4.x = static_cast<float>(__ldg(int_buf + inIdx)) * scalar1;
floatTmp4.y = static_cast<float>(__ldg(int_buf+inIdx+1)) * scalar1;
floatTmp4.z = static_cast<float>(__ldg(int_buf+inIdx+2)) * scalar1;
floatTmp4.w = static_cast<float>(__ldg(int_buf+inIdx+3)) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual){
mask_id = threadIdx4 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
//for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id))) * -10000.0f;
floatTmp4.x = floatTmp4.x + mask_val;
max_val = fmaxf(max_val, floatTmp4.x);
//for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id+1))) * -10000.0f;
floatTmp4.y = floatTmp4.y + mask_val;
max_val = fmaxf(max_val, floatTmp4.y);
//for z
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id+2))) * -10000.0f;
floatTmp4.z = floatTmp4.z + mask_val;
max_val = fmaxf(max_val, floatTmp4.z);
//for w
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id+3))) * -10000.0f;
floatTmp4.w = floatTmp4.w + mask_val;
max_val = fmaxf(max_val, floatTmp4.w);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual){
floatTmp4.x = __expf(floatTmp4.x - s_max);
sum_val += floatTmp4.x;
floatTmp4.y = __expf(floatTmp4.y - s_max);
sum_val += floatTmp4.y;
floatTmp4.z = __expf(floatTmp4.z - s_max);
sum_val += floatTmp4.z;
floatTmp4.w = __expf(floatTmp4.w - s_max);
sum_val += floatTmp4.w;
}
sum_val = blockDim.x <= 32 ? warpReduceSum(sum_val) : blockReduceSum<float>(sum_val);
if (threadIdx.x == 0){
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual){
tmp4.x = float_to_int8_rn(floatTmp4.x*s_sum);
tmp4.y = float_to_int8_rn(floatTmp4.y*s_sum);
tmp4.z = float_to_int8_rn(floatTmp4.z*s_sum);
tmp4.w = float_to_int8_rn(floatTmp4.w*s_sum);
buf4Ptr[inIdx >> 2] = tmp4;
}
}
}
//int_buf are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
//grid = (seq_len, batch_size, head_num)
//block.x = (seq_len + 31)/32
//for int32_t I; int8 O;
//for seq_len <= 32
template <typename T>
__global__
void softmax_COL32_LE32(int8_t* qk_buf_, const int32_t* int_buf, const T* attr_mask, const int batch_size,
const int head_num, const int seq_len, const float scalar1a, const float *scalar1b,
const float *scalar1c, const float *amax_ptr, const int head_num_x_seq_len, const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdxx = threadIdx.x;
bool qual = threadIdxx < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) +
(threadIdxx & 0xffffffe0) * seq_len +
(seq_id << 5) + (threadIdxx & 31);
float floatTmp = qual ? static_cast<float>(__ldg(int_buf + inIdx)) * scalar1 : 0.0f;
float mask_val, max_val;
__shared__ float s_max, s_sum;
mask_id = qual ? threadIdxx + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len : 0;
mask_val = qual ? (1.0f - static_cast<float>(__ldg(attr_mask+mask_id))) * -10000.0f : 0.0f;
floatTmp = qual ? floatTmp + mask_val : 0.0f;
max_val = qual ? floatTmp : -1e20f;
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
floatTmp = qual ? __expf(floatTmp - s_max) : 0.0f;
float sum_val = blockDim.x <= 32 ? warpReduceSum(floatTmp) : blockReduceSum<float>(floatTmp);
if (threadIdx.x == 0){
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual){
qk_buf_[inIdx] = float_to_int8_rn(floatTmp*s_sum);
}
}
}
//int_buf are a series of sub-matrixes of m = seq_len, n = seq_len, CUBLASLT_ORDER_COL32
//grid = (seq_len, batch_size, head_num)
//block.x = max(32, (seq_len/2 + 31)/32*32)
//for int32_t I; int8 O;
//for seq_len in (32, 64]
template <typename T>
__global__
void softmax_COL32_LE64(int8_t* qk_buf_, const int32_t* int_buf, const T* attr_mask, const int batch_size,
const int head_num, const int seq_len, const float scalar1a, const float *scalar1b,
const float *scalar1c, const float *amax_ptr, const int head_num_x_seq_len, const int seq_len_x_seq_len)
{
const float amax = __ldg(amax_ptr);
const float scalar1 = scalar1a * __ldg(scalar1b) * __ldg(scalar1c);
int mask_id;
int threadIdx2 = threadIdx.x << 1;
char2* buf2Ptr = (char2 *)qk_buf_;
bool qual = threadIdx2 < seq_len;
for (int seq_id = blockIdx.x ; seq_id < seq_len ; seq_id += gridDim.x){
char2 tmp2;
float2 floatTmp2 = {0.0f, 0.0f};
int inIdx = (blockIdx.y * head_num + blockIdx.z) * (seq_len_x_seq_len) +
(threadIdx2 & 0xffffffe0) * seq_len +
(seq_id << 5) + (threadIdx2 & 31);
if (qual){
floatTmp2.x = static_cast<float>(__ldg(int_buf + inIdx)) * scalar1;
floatTmp2.y = static_cast<float>(__ldg(int_buf + inIdx + 1)) * scalar1;
}
float mask_val, max_val;
max_val = -1e20f;
__shared__ float s_max, s_sum;
if (qual){
mask_id = threadIdx2 + blockIdx.y * seq_len_x_seq_len + seq_id * seq_len;
//for x
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id))) * -10000.0f;
floatTmp2.x = floatTmp2.x + mask_val;
//for y
mask_val = (1.0f - static_cast<float>(__ldg(attr_mask+mask_id+1))) * -10000.0f;
floatTmp2.y = floatTmp2.y + mask_val;
max_val = fmaxf(floatTmp2.x, floatTmp2.y);
}
max_val = blockDim.x <= 32 ? warpReduceMax(max_val) : blockReduceMax<float>(max_val);
if (threadIdx.x == 0){
s_max = max_val;
}
__syncthreads();
float sum_val = 0.0f;
if (qual){
floatTmp2.x = __expf(floatTmp2.x - s_max);
sum_val += floatTmp2.x;
floatTmp2.y = __expf(floatTmp2.y - s_max);
sum_val += floatTmp2.y;
}
sum_val = blockDim.x <= 32 ? warpReduceSum(sum_val) : blockReduceSum<float>(sum_val);
if (threadIdx.x == 0){
s_sum = __fdividef(127.0f, (sum_val + 1e-6f));
s_sum = __fdividef(s_sum, amax);
}
__syncthreads();
if (qual){
tmp2.x = float_to_int8_rn(floatTmp2.x*s_sum);
tmp2.y = float_to_int8_rn(floatTmp2.y*s_sum);
buf2Ptr[inIdx >> 1] = tmp2;
}
}
}
template<typename T>
__global__
void transpose(T* src, T* dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int batch_id = blockIdx.x / (head_num * seq_len);
int seq_id = blockIdx.x % seq_len;
int head_id = (blockIdx.x % (head_num * seq_len))/ seq_len;
dst[batch_id * (head_num * seq_len * size_per_head) + seq_id * head_num * size_per_head
+ head_id * size_per_head + threadIdx.x] = src[blockIdx.x * size_per_head + threadIdx.x];
}
template<>
__global__
void transpose(half* src, half* dst,
const int batch_size, const int seq_len, const int head_num, const int size_per_head)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int batch_id = tid / (head_num * seq_len * size_per_head);
int head_id = (tid % (head_num * seq_len * size_per_head)) / (seq_len * size_per_head);
int seq_id = (tid % (seq_len * size_per_head)) / size_per_head;
int id = tid % size_per_head;
int target_id = target_index(batch_id, head_id, seq_id, id, batch_size, head_num, seq_len, size_per_head);
half2* src_ptr = (half2*)src;
half2* dst_ptr = (half2*)dst;
dst_ptr[target_id] = src_ptr[tid];
}
template<typename T>
__global__
void transpose_rebuild_padding(T* src, T* dst, const int batch_size, const int seq_len, const int head_num, const int size_per_head,
const int* mask_offset)
{
// TODO: optimize this kernel?
// do remove_sequence_length_padding
const int tid = threadIdx.x; // batch * seq_len or valid_word_num
const int bid = blockIdx.x; // head_num * size_per_head
const int src_batch_id = (bid + mask_offset[bid]) / seq_len;
const int src_seq_id = (bid + mask_offset[bid]) % seq_len;
const int dst_seq_id = bid;
const int head_id = tid / size_per_head;
const int hidden_id = tid % size_per_head;
dst[dst_seq_id * head_num * size_per_head + tid] = src[ src_batch_id * head_num * seq_len * size_per_head +
head_id * seq_len * size_per_head + src_seq_id * size_per_head + hidden_id];
}
template<typename T>
__global__ void rebuild_sequence_length_padding(const T* src, T* tgt,
const int* mask_offset,
const int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int tgt_seq_id = bid + mask_offset[bid];
const int src_seq_id = bid;
for(int i = tid; i < n; i += blockDim.x)
{
tgt[tgt_seq_id * n + i] = src[src_seq_id * n + i];
}
}
template<OperationType OpType_>
void OpenMultiHeadAttention<OpType_>::multiHeadAttr_nofuse_kernelLauncher(
cudaStream_t stream,
cublasHandle_t cublas_handle,
cublasLtHandle_t cublaslt_handle,
DataType_* Q,
const DataType_* bias_Q,
DataType_* K,
const DataType_* bias_K,
DataType_* V,
const DataType_* bias_V,
const DataType_* attr_mask,
DataType_* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int int8_mode_,
const DataType_ scalar)
{
const int k = head_num * size_per_head;
dim3 grid;
dim3 block;
if (int8_mode_ != 0){
//var for int8
const float*q_buf_addBias_amax_ptr, *k_buf_addBias_amax_ptr, *v_buf_addBias_amax_ptr, *qk_afterSM_amax_ptr, *qkv_amax_ptr, *in_amax_ptr;
q_buf_addBias_amax_ptr = param_.amaxList+4;
k_buf_addBias_amax_ptr = param_.amaxList + 8;
v_buf_addBias_amax_ptr = param_.amaxList + 12;
qk_afterSM_amax_ptr = param_.amaxList + 16;
qkv_amax_ptr = param_.amaxList + 20;
in_amax_ptr = param_.amaxList;
assert(seq_len % COL32_ == 0 && size_per_head%COL32_ == 0);
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len){
add_QK_bias_transform<<<dim3(batch_size*seq_len*2), dim3((head_num * size_per_head)/4), 0, stream>>>((int8_t*)q_buf_, (int8_t*)k_buf_, (const int32_t*) Q, bias_Q, (const int32_t*) K,
bias_K, batch_size * seq_len, batch_size, seq_len, head_num, size_per_head,
seq_len*size_per_head, query_weight_amax_list, in_amax_ptr+2, key_weight_amax_list,
in_amax_ptr+2, q_buf_addBias_amax_ptr+3, k_buf_addBias_amax_ptr+3);
add_V_bias_transform<<<dim3(size_per_head/32, seq_len/32, batch_size*head_num), dim3(8, 32), 0, stream>>>((int8_t*)v_buf_, (const int32_t *)V, bias_V, batch_size, seq_len,
head_num, size_per_head, seq_len*size_per_head, value_weight_amax_list,
in_amax_ptr+2, v_buf_addBias_amax_ptr+3);
}
else{
cudaMemset(sequence_id_map_, -1, batch_size * seq_len * sizeof(int));
mappingRemovePaddingData<<<dim3((param_.valid_word_num + 63)/64), dim3(64)>>>(sequence_id_map_, param_.sequence_id_offset, param_.valid_word_num);
add_QK_bias_transform_rebuild_padding<<<dim3(param_.valid_word_num*2), dim3((head_num * size_per_head)/4), 0, stream>>>((int8_t*)q_buf_, (int8_t*)k_buf_, (const int32_t*) Q, bias_Q,
(const int32_t*) K, bias_K, param_.sequence_id_offset, param_.valid_word_num,
batch_size * seq_len, batch_size, seq_len, head_num, size_per_head, seq_len*size_per_head,
query_weight_amax_list, in_amax_ptr+2, key_weight_amax_list, in_amax_ptr+2,
q_buf_addBias_amax_ptr+3, k_buf_addBias_amax_ptr+3);
add_V_bias_transform_rebuild_padding<<<dim3(size_per_head/32, seq_len/32, batch_size*head_num), dim3(8, 32), 0, stream>>>((int8_t*)v_buf_, (const int32_t *)V, bias_V, sequence_id_map_,
param_.valid_word_num, batch_size, seq_len, head_num,
size_per_head, seq_len*size_per_head, value_weight_amax_list,
in_amax_ptr+2, v_buf_addBias_amax_ptr+3);
}
int batchCount = batch_size * head_num;
cublasLtMM_withAlgo(qk_int_buf_, batchCount, seq_len, seq_len, size_per_head,
size_per_head*seq_len, size_per_head*seq_len, seq_len*seq_len,
(int8_t*)q_buf_, (int8_t*)k_buf_, cublaslt_handle, stream, cublasLtAlgoMap);
grid.x = seq_len;
grid.y = batch_size;
grid.z = head_num;
if (seq_len <= 32){
if (batch_size * head_num > 960)
grid.x = ceil(float(seq_len)/32.0f);
block.x = (seq_len + 31)/32*32;
softmax_COL32_LE32<<<grid, block, 0, stream>>>((int8_t*)qk_buf_, qk_int_buf_, attr_mask, batch_size, head_num,
seq_len, float(scalar), q_buf_addBias_amax_ptr + 1, k_buf_addBias_amax_ptr + 1,
qk_afterSM_amax_ptr, seq_len*head_num, seq_len*seq_len);
}
else if (seq_len <= 64){
assert(seq_len % 2 == 0);
block.x = (seq_len/2 + 31)/32*32;
if (batch_size * head_num > 960)
grid.x = ceil(float(seq_len)/32.0f);
softmax_COL32_LE64<<<grid, block, 0, stream>>>((int8_t*)qk_buf_, qk_int_buf_, attr_mask, batch_size, head_num,
seq_len, float(scalar), q_buf_addBias_amax_ptr + 1, k_buf_addBias_amax_ptr + 1,
qk_afterSM_amax_ptr, seq_len*head_num, seq_len*seq_len);
}
else
{
assert(seq_len % 4 == 0);
block.x = (seq_len/4 + 31)/32*32;
softmax_COL32<<<grid, block, 0, stream>>>((int8_t*)qk_buf_, qk_int_buf_, attr_mask, batch_size, head_num,
seq_len, float(scalar), q_buf_addBias_amax_ptr + 1, k_buf_addBias_amax_ptr + 1,
qk_afterSM_amax_ptr, seq_len*head_num, seq_len*seq_len);
}
cublasLtMM_withAlgo(transpose_dst_int_buf_, batchCount, seq_len, size_per_head, seq_len,
seq_len*seq_len, size_per_head*seq_len, size_per_head*seq_len, (int8_t*)qk_buf_,
(int8_t*)v_buf_, cublaslt_handle, stream, cublasLtAlgoMap);
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len){
transpose_COL32_kernelLauncher((int8_t*)dst, (const int*)transpose_dst_int_buf_, batch_size, seq_len, head_num,
size_per_head, v_buf_addBias_amax_ptr+1, qk_afterSM_amax_ptr+1, qkv_amax_ptr+3, stream);
}
else{
transpose_COL32_rebuild_padding_kernelLauncher((int8_t*)dst, (const int*)transpose_dst_int_buf_, sequence_id_map_,
param_.valid_word_num, batch_size, seq_len, head_num, size_per_head,
v_buf_addBias_amax_ptr+1, qk_afterSM_amax_ptr+1, qkv_amax_ptr+3, stream);
}
}
//FP32/FP16
else{
if(OpType_ == OperationType::FP32)
{
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len)
{
const int m = batch_size * seq_len;
const int word_per_block = 1;
assert(k <= 1024);
assert(m / word_per_block * 3 <= 65536);
dim3 grid(m / word_per_block * 3);
dim3 block(k);
add_QKV_bias<DataType_><<<grid, block, 0, stream>>>(Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head, word_per_block);
}
else
{
add_QKV_bias_rebuild_padding<DataType_><<<param_.valid_word_num, k, 0, stream>>>(Q, bias_Q, K, bias_K,
V, bias_V, q_buf_, k_buf_, v_buf_,
batch_size, seq_len, head_num, size_per_head, param_.sequence_id_offset);
}
}
else
{
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len)
{
const int word_per_block = 1;
grid.x = batch_size * seq_len / word_per_block;
block.x = head_num * size_per_head * word_per_block / 2;
assert(block.x <= 1024);
add_QKV_bias<DataType_><<<grid, block, 0, stream>>>(Q, bias_Q, K, bias_K, V, bias_V, q_buf_, k_buf_,
v_buf_, batch_size, seq_len, head_num, size_per_head / 2, word_per_block);
}
else
{
add_QKV_bias_rebuild_padding<half2><<<param_.valid_word_num, k / 2, 0, stream>>>((half2*)Q, (const half2*)bias_Q,
(half2*)K, (const half2*)bias_K, (half2*)V, (const half2*)bias_V,
(half2*)q_buf_, (half2*)k_buf_, (half2*)v_buf_,
batch_size, seq_len, head_num, size_per_head / 2, param_.sequence_id_offset);
}
}
DataType_ alpha = (DataType_)1.0f, beta = (DataType_)0.0f;
check_cuda_error(cublasGemmStridedBatchedEx(cublas_handle,
CUBLAS_OP_T, CUBLAS_OP_N,
seq_len, seq_len, size_per_head,
&alpha,
k_buf_, AType_, size_per_head, seq_len * size_per_head,
q_buf_, BType_, size_per_head, seq_len * size_per_head,
&beta,
qk_buf_, CType_, seq_len, seq_len * seq_len,
batch_size * head_num,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[1])));
//deal with odd seq_len
if (seq_len % 2 != 0){
if(seq_len <= 32)
block.x = 32;
else if(seq_len > 32 && seq_len <= 64)
block.x = 64;
else if(seq_len > 64 && seq_len <= 128)
block.x = 128;
else if(seq_len > 128 && seq_len <= 256)
block.x = 256;
else if(seq_len > 256 && seq_len <= 512)
block.x = 512;
else
block.x = 1024;
if(batch_size * head_num <= 120)
{
grid.x = batch_size * head_num * seq_len;
softmax_kernel_v2<DataType_><<<grid, block, 0, stream>>>(qk_buf_, attr_mask, batch_size, head_num, seq_len, scalar);
}
else
{
grid.x = batch_size * head_num;
softmax_kernel<DataType_><<<grid, block, 0, stream>>>(qk_buf_, attr_mask, batch_size, head_num, seq_len, scalar);
}
}
//deal with even seq_len
else{
grid.x = seq_len;
if (batch_size * head_num > 360)
grid.x = ceil(float(seq_len)/32.0f);
grid.y = batch_size;
grid.z = head_num;
if (seq_len <= 32){
block.x = 32;
softmax_kernel_v3_LE32<DataType_><<<grid, block, 0, stream>>>(qk_buf_, attr_mask, batch_size, head_num, seq_len, scalar);
}
else{
if (OpType_ == OperationType::FP16){
block.x = (seq_len/2 + 31)/32*32;
softmax_kernel_v3<<<grid, block, 0, stream>>>(qk_buf_, attr_mask, batch_size, head_num, seq_len, scalar);
}
else{
block.x = (seq_len + 31)/32*32;
softmax_kernel_v3<DataType_><<<grid, block, 0, stream>>>(qk_buf_, attr_mask, batch_size, head_num, seq_len, scalar);
}
}
grid.x = grid.y = grid.z = 1;
}
check_cuda_error(cublasGemmStridedBatchedEx(cublas_handle,
CUBLAS_OP_N, CUBLAS_OP_N,
size_per_head, seq_len, seq_len,
&alpha,
v_buf_, AType_, size_per_head, seq_len * size_per_head,
qk_buf_, BType_, seq_len, seq_len * seq_len,
&beta,
transpose_dst_, CType_, size_per_head, seq_len * size_per_head,
batch_size * head_num,
computeType_,
static_cast<cublasGemmAlgo_t>(cublasAlgo_[2])));
/* for half2 only */
if(OpType_ == OperationType::FP16)
{
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len)
{
const int seq_per_block = 4;
grid.x = batch_size * head_num * seq_len / seq_per_block;
block.x = seq_per_block * size_per_head / 2;
assert(grid.x * seq_per_block == batch_size * head_num * seq_len);
transpose<DataType_><<<grid, block, 0, stream>>>(transpose_dst_, dst,
batch_size, seq_len, head_num, size_per_head / 2);
}
else
{
transpose_rebuild_padding<half2><<<param_.valid_word_num, k / 2, 0, stream>>>(
(half2*)transpose_dst_, (half2*)dst,
batch_size, seq_len, head_num, size_per_head / 2, param_.sequence_id_offset);
}
}
else
{
if(param_.sequence_id_offset == nullptr || param_.valid_word_num == batch_size * seq_len)
{
const int seq_per_block = 1;
grid.x = batch_size * head_num * seq_len / seq_per_block;
block.x = seq_per_block * size_per_head;
transpose<DataType_><<<grid, block, 0, stream>>>(transpose_dst_, dst,
batch_size, seq_len, head_num, size_per_head);
}
else
{
transpose_rebuild_padding<DataType_><<<param_.valid_word_num, k, 0, stream>>>(transpose_dst_, dst,
batch_size, seq_len, head_num, size_per_head, param_.sequence_id_offset);
}
}
}
}
template void OpenMultiHeadAttention<OperationType::FP32>::multiHeadAttr_nofuse_kernelLauncher(
cudaStream_t stream,
cublasHandle_t handle,
cublasLtHandle_t cublaslt_handle,
float* Q,
const float* bias_Q,
float* K,
const float* bias_K,
float* V,
const float* bias_V,
const float* attr_mask,
float* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int int8_mode_,
const float scalar);
template void OpenMultiHeadAttention<OperationType::FP16>::multiHeadAttr_nofuse_kernelLauncher(
cudaStream_t stream,
cublasHandle_t handle,
cublasLtHandle_t cublaslt_handle,
half* Q,
const half* bias_Q,
half* K,
const half* bias_K,
half* V,
const half* bias_V,
const half* attr_mask,
half* dst,
const int batch_size,
const int seq_len,
const int head_num,
const int size_per_head,
const int int8_mode_,
const half scalar);
}//namespace cuda
}//namespace fastertransformer
|
ed00eaff731cad2379e42dc700715b0530203336.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudatbx/scattering/direct_summation.cuh>
namespace cudatbx {
namespace scattering {
/* ==========================================================================
*/
cudatbx::scattering::direct_summation::direct_summation() {
// set host and device pointers to NULL
h_xyz = NULL;
h_solvent = NULL;
h_h = NULL;
h_rt = NULL;
h_weights = NULL;
h_scattering_type = NULL;
h_a = NULL;
h_b = NULL;
h_c = NULL;
d_xyz = NULL;
d_solvent = NULL;
d_h = NULL;
d_rt = NULL;
d_weights = NULL;
d_scattering_type = NULL;
amplitudes_allocated = false;
h_real = NULL;
h_imag = NULL;
d_real = NULL;
d_imag = NULL;
workspace_allocated = false;
d_workspace = NULL;
}
cudatbx::scattering::direct_summation::~direct_summation() {
clear_arrays();
clear_amplitudes();
clear_workspace();
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_xyz
(const scitbx::af::const_ref<scitbx::vec3<double> >& xyz) {
// allocate memory if necessary
if (n_xyz != xyz.size()) {
clear_xyz();
n_xyz = xyz.size();
padded_n_xyz = cudatbx::calculate_padded_size(n_xyz,padding);
size_xyz = 3 * padded_n_xyz;
h_xyz = new fType[size_xyz];
cudaSafeCall( hipMalloc((void**)&d_xyz,size_xyz*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_xyz; i++) {
for (int j=0; j<3; j++) {
h_xyz[j*padded_n_xyz + i] = fType(xyz[i][j]);
}
}
// transfer to GPU
cudaSafeCall( hipMemcpy(d_xyz, h_xyz, size_xyz*sizeof(fType),
hipMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_xyz() {
delete[] h_xyz;
cudaSafeCall( hipFree(d_xyz) );
h_xyz = NULL;
d_xyz = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_solvent_weights
(const scitbx::af::const_ref<double>& solvent_weights) {
// allocate memory if necessary
SCITBX_ASSERT (n_xyz == solvent_weights.size());
if (n_solvent != solvent_weights.size()) {
clear_solvent_weights();
n_solvent = solvent_weights.size();
h_solvent = new fType[padded_n_xyz];
cudaSafeCall( hipMalloc((void**)&d_solvent,padded_n_xyz*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_xyz; i++) {
h_solvent[i] = fType(solvent_weights[i]);
}
// transfer to GPU
cudaSafeCall( hipMemcpy(d_solvent, h_solvent,
padded_n_xyz*sizeof(fType),
hipMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_solvent_weights() {
delete[] h_solvent;
cudaSafeCall( hipFree(d_solvent) );
h_solvent = NULL;
d_solvent = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_hkl
(const scitbx::af::const_ref<scitbx::vec3<double> >& h) {
// allocate memory if necessary
if (n_h != h.size()) {
clear_hkl();
n_h = h.size();
padded_n_h = cudatbx::calculate_padded_size(n_h,padding);
size_h = 3 * padded_n_h;
h_h = new fType[size_h];
cudaSafeCall( hipMalloc((void**)&d_h,size_h*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_h; i++) {
for (int j=0; j<3; j++) {
h_h[j*padded_n_h + i] = fType(h[i][j]);
}
}
// transfer to GPU
cudaSafeCall( hipMemcpy(d_h, h_h, size_h*sizeof(fType),
hipMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_hkl() {
delete[] h_h;
cudaSafeCall( hipFree(d_h) );
h_h = NULL;
d_h = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_q
(const scitbx::af::const_ref<double>& q) {
// q data, use h variables
if (n_h != q.size()) {
clear_q();
n_h = q.size();
padded_n_h = cudatbx::calculate_padded_size(n_h,padding);
size_h = padded_n_h;
h_h = new fType[size_h];
cudaSafeCall( hipMalloc((void**)&d_h,size_h*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_h; i++) {
h_h[i] = fType(q[i]);
}
// transfer to GPU
cudaSafeCall( hipMemcpy(d_h, h_h, size_h*sizeof(fType),
hipMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_q() {
clear_hkl();
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_lattice
(const scitbx::af::const_ref<double>& lattice_weights,
const scitbx::af::const_ref<double>& lattice) {
// lattice points, use rotation/translation
if (n_rt != lattice_weights.size()) {
clear_lattice();
n_rt = lattice_weights.size();
size_rt = cudatbx::calculate_padded_size(n_rt,padding);
h_weights = new fType[size_rt];
h_rt = new fType[3*size_rt];
cudaSafeCall( hipMalloc((void**)&d_weights,size_rt*sizeof(fType)) );
cudaSafeCall( hipMalloc((void**)&d_rt,3*size_rt*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_rt; i++) {
h_weights[i] = fType(lattice_weights[i]/n_rt);
for (int j=0; j<3; j++) {
h_rt[j*size_rt + i] = fType(lattice[j*n_rt + i]);
}
}
// transfer to GPU
cudaSafeCall( hipMemcpy(d_weights, h_weights, size_rt*sizeof(fType),
hipMemcpyHostToDevice) );
cudaSafeCall( hipMemcpy(d_rt, h_rt, 3*size_rt*sizeof(fType),
hipMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_weights() {
delete[] h_weights;
cudaSafeCall( hipFree(d_weights) );
h_weights = NULL;
d_weights = NULL;
}
void cudatbx::scattering::direct_summation::clear_lattice() {
clear_weights();
clear_rotations_translations();
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_rotations_translations
(const scitbx::af::const_ref<double>& rotations,
const scitbx::af::const_ref<scitbx::vec3<double> >& translations) {
// each rotation/translation pair is combined and padded to take up
// 64 bytes so that a coalesced read will read two pairs
if (n_rt != translations.size()) {
clear_rotations_translations();
n_rt = translations.size();
size_rt = padded_size * n_rt;
h_rt = new fType[size_rt];
cudaSafeCall( hipMalloc((void**)&d_rt,size_rt*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_rt; i++) {
for (int j=0; j<9; j++) {
h_rt[padded_size*i + j] = fType(rotations[9*i + j]);
}
for (int j=0; j<3; j++) {
h_rt[padded_size*i + j + 9] = fType(translations[i][j]);
}
}
// transfer to GPU
cudaSafeCall( hipMemcpy(d_rt, h_rt, size_rt*sizeof(fType),
hipMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_rotations_translations() {
delete[] h_rt;
cudaSafeCall( hipFree(d_rt) );
h_rt = NULL;
d_rt = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_scattering_types
(const scitbx::af::const_ref<std::string>& scatterers,
const cctbx::xray::scattering_type_registry& registry) {
// allocate memory if necessary
SCITBX_ASSERT (n_xyz == scatterers.size());
if (n_scatterers != scatterers.size()) {
clear_scattering_types();
n_scatterers = scatterers.size();
h_scattering_type = new int[padded_n_xyz];
cudaSafeCall( hipMalloc((void**)&d_scattering_type,
padded_n_xyz*sizeof(int)) );
}
// convert values
for (int i=0; i<n_xyz; i++) {
h_scattering_type[i] = registry.unique_index(scatterers[i]);
}
// transfer to GPU
cudaSafeCall( hipMemcpy(d_scattering_type,h_scattering_type,
padded_n_xyz*sizeof(int),hipMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_scattering_types() {
delete[] h_scattering_type;
cudaSafeCall( hipFree(d_scattering_type) );
h_scattering_type = NULL;
d_scattering_type = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_scattering_type_registry
(const cctbx::xray::scattering_type_registry& registry,
const bool& complex_form_factor) {
// convert form factors
// add ordinary oxygen form factor at end for boundary layer solvent
scitbx::af::shared<boost::optional
<cctbx::eltbx::xray_scattering::gaussian> >
unique_gaussians = registry.unique_gaussians;
n_types = unique_gaussians.size() + 1;
n_terms = unique_gaussians[0].get().n_terms();
f_size = n_types * n_terms;
delete[] h_a;
delete[] h_b;
delete[] h_c;
h_a = new fType[f_size];
h_b = new fType[f_size];
h_c = new fType[n_types];
for (int i=0; i<f_size; i++) {
h_a[i] = fType(0.0);
h_b[i] = fType(0.0);
}
for (int i=0; i<n_types-1; i++) {
for (int j=0; j<n_terms; j++) {
h_a[i*n_terms + j] = fType(unique_gaussians[i].get().array_of_a()[j]);
h_b[i*n_terms + j] = fType(unique_gaussians[i].get().array_of_b()[j]);
}
if (unique_gaussians[i].get().use_c()) {
h_c[i] = fType(unique_gaussians[i].get().c());
}
else {
h_c[i] = fType(0.0);
}
}
// add form factor for boundary layer solvent
cctbx::eltbx::xray_scattering::gaussian hoh =
cctbx::eltbx::xray_scattering::wk1995("O",true).fetch();
for (int i=0; i<hoh.array_of_a().size(); i++){
h_a[(n_types-1)*n_terms + i] = fType(hoh.array_of_a()[i]);
h_b[(n_types-1)*n_terms + i] = fType(hoh.array_of_b()[i]);
}
if (hoh.use_c()) {
h_c[n_types-1] = fType(hoh.c());
}
else {
h_c[n_types-1] = fType(0.0);
}
// transfer to GPU
cudaSafeCall( hipMemcpyToSymbol(dc_a, h_a, f_size*sizeof(fType)) );
cudaSafeCall( hipMemcpyToSymbol(dc_b, h_b, f_size*sizeof(fType)) );
cudaSafeCall( hipMemcpyToSymbol(dc_c, h_c, n_types*sizeof(fType)) );
cudaSafeCall( hipMemcpyToSymbol(dc_n_types, &n_types, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(dc_n_terms, &n_terms, sizeof(int)) );
cudaSafeCall( hipMemcpyToSymbol(dc_complex_form_factor,
&complex_form_factor, sizeof(bool)) );
}
void cudatbx::scattering::direct_summation::clear_scattering_type_registry() {
delete[] h_a;
delete[] h_b;
delete[] h_c;
h_a = NULL;
h_b = NULL;
h_c = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::clear_arrays() {
// clear pointers and set all pointers to NULL
clear_xyz();
clear_solvent_weights();
clear_hkl();
clear_rotations_translations();
clear_weights();
clear_scattering_types();
clear_scattering_type_registry();
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::allocate_amplitudes() {
if (amplitudes_allocated) {
clear_amplitudes();
}
h_real = new fType[n_h];
h_imag = new fType[n_h];
cudaSafeCall( hipMalloc((void**)&d_real,n_h*sizeof(fType)) );
cudaSafeCall( hipMalloc((void**)&d_imag,n_h*sizeof(fType)) );
amplitudes_allocated = true;
}
void cudatbx::scattering::direct_summation::reset_amplitudes() {
fType zero = fType(0.0);
for (int i=0; i<n_h; i++) {
h_real[i] = zero;
h_imag[i] = zero;
}
cudaSafeCall( hipMemcpy(d_real,h_real,n_h*sizeof(fType),
hipMemcpyHostToDevice) );
cudaSafeCall( hipMemcpy(d_imag,h_imag,n_h*sizeof(fType),
hipMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_amplitudes() {
delete[] h_real;
delete[] h_imag;
cudaSafeCall( hipFree(d_real) );
cudaSafeCall( hipFree(d_imag) );
h_real = NULL;
h_imag = NULL;
d_real = NULL;
d_imag = NULL;
amplitudes_allocated = false;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::allocate_workspace
(const int& length) {
if (workspace_allocated) {
clear_workspace();
}
cudaSafeCall( hipMalloc((void**)&d_workspace,length*sizeof(fType)) );
workspace_allocated = true;
}
void cudatbx::scattering::direct_summation::clear_workspace() {
cudaSafeCall( hipFree(d_workspace) );
d_workspace = NULL;
workspace_allocated = false;
}
/* --------------------------------------------------------------------------
reorganizes data and calls cuda
padded to multiple of 128 bytes, (32 * sizeof(float or int))
*/
void cudatbx::scattering::direct_summation::run_kernel() {
int blocks_per_grid = cudatbx::calculate_blocks_per_grid(n_h,threads_per_block);
hipLaunchKernelGGL(( structure_factor_kernel<fType>), dim3(blocks_per_grid),dim3(threads_per_block), 0, 0,
d_scattering_type, d_xyz, d_solvent, n_xyz, padded_n_xyz,
d_h, n_h, padded_n_h,
d_rt, n_rt,
d_real, d_imag);
}
void cudatbx::scattering::direct_summation::add
(const scitbx::af::const_ref<std::string>& scatterers,
const scitbx::af::const_ref<scitbx::vec3<double> >& xyz,
const scitbx::af::const_ref<double>& solvent_weights,
const scitbx::af::const_ref<scitbx::vec3<double> >& h,
const scitbx::af::const_ref<double>& rotations,
const scitbx::af::const_ref<scitbx::vec3<double> >& translations,
const cctbx::xray::scattering_type_registry& registry,
const bool& complex_form_factor) {
// reorganize input data, allocates arrays, transfer to GPU, order matters
set_xyz(xyz);
set_solvent_weights(solvent_weights);
set_hkl(h);
set_rotations_translations(rotations,translations);
set_scattering_types(scatterers,registry);
set_scattering_type_registry(registry,complex_form_factor);
// allocate arrays for results if necessary
if (!amplitudes_allocated) {
allocate_amplitudes();
reset_amplitudes();
}
// run calculation
run_kernel();
// deallocate arrays
clear_arrays();
}
/* --------------------------------------------------------------------------
reorganizes data and calls cuda
padded to multiple of 128 bytes, (32 * sizeof(float or int))
"Rapid and accurate calculation of small-angle scattering profiles using
the golden ratio"
Watson, MC, Curtis, JE. J. Appl. Cryst. (2013). 46, 1171-1177
solvent variables are used for weights and code is not optimal
possibly subclass or split everything into functions
*/
void cudatbx::scattering::direct_summation::prepare_saxs
(const scitbx::af::const_ref<std::string>& scatterers,
const scitbx::af::const_ref<scitbx::vec3<double> >& xyz,
const scitbx::af::const_ref<double>& solvent_weights,
const scitbx::af::const_ref<double>& q,
const scitbx::af::const_ref<double>& lattice_weights,
const scitbx::af::const_ref<double>& lattice,
const cctbx::xray::scattering_type_registry& registry,
const bool& complex_form_factor) {
// reorganize input data, allocates arrays, transfer to GPU, order matters
set_xyz(xyz);
set_solvent_weights(solvent_weights);
set_q(q);
set_lattice(lattice_weights,lattice);
set_scattering_types(scatterers,registry);
set_scattering_type_registry(registry,complex_form_factor);
// allocate arrays for results if necessary
if (!amplitudes_allocated) {
allocate_amplitudes();
}
}
void cudatbx::scattering::direct_summation::run_saxs_kernel() {
// allocate working space if necessary
if (!workspace_allocated) {
workspace_size = int(::floor(n_h*n_rt/padding + 1.0)) * padding;
allocate_workspace(3*workspace_size);
}
int blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_rt,threads_per_block);
hipLaunchKernelGGL(( expand_q_lattice_kernel<fType>), dim3(blocks_per_grid),dim3(threads_per_block), 0, 0,
d_h, n_h,
d_rt, n_rt, size_rt,
d_workspace, workspace_size);
blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_h*n_rt,threads_per_block);
hipLaunchKernelGGL(( saxs_kernel<fType>), dim3(blocks_per_grid),dim3(threads_per_block), 0, 0,
d_scattering_type, d_xyz, d_solvent, n_xyz, padded_n_xyz,
n_h, n_rt,
d_workspace, workspace_size);
}
void cudatbx::scattering::direct_summation::run_solvent_saxs_kernel() {
// allocate working space if necessary
if (!workspace_allocated) {
workspace_size = int(::floor(n_h*n_rt/padding + 1.0)) * padding;
allocate_workspace(7*workspace_size);
}
int blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_rt,threads_per_block);
hipLaunchKernelGGL(( expand_q_lattice_kernel<fType>), dim3(blocks_per_grid),dim3(threads_per_block), 0, 0,
d_h, n_h,
d_rt, n_rt, size_rt,
d_workspace, workspace_size);
blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_h*n_rt,threads_per_block);
hipLaunchKernelGGL(( solvent_saxs_kernel<fType>), dim3(blocks_per_grid),dim3(threads_per_block), 0, 0,
d_scattering_type, d_xyz, d_solvent, n_xyz, padded_n_xyz,
n_h, n_rt,
d_workspace, workspace_size);
}
void cudatbx::scattering::direct_summation::run_collect_solvent_saxs_kernel
(const double& c1, const double& c2) {
assert(workspace_allocated);
int blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_h*n_rt,threads_per_block);
hipLaunchKernelGGL(( collect_solvent_saxs_kernel<fType>), dim3(blocks_per_grid),dim3(threads_per_block), 0, 0,
n_h, n_rt,fType(c1),fType(c2),d_workspace, workspace_size);
}
void cudatbx::scattering::direct_summation::sum_over_lattice() {
int blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_rt,threads_per_block);
for (int i=0; i<n_h; i++) {
hipLaunchKernelGGL(( cudatbx::math::weighted_sum_kernel<fType>)
, dim3(blocks_per_grid),dim3(threads_per_block),threads_per_block*sizeof(fType), 0,
&d_workspace[i*n_rt],d_weights,n_rt,&d_real[i]);
}
}
/* --------------------------------------------------------------------------
return total sum
*/
scitbx::af::shared<std::complex<double> >
cudatbx::scattering::direct_summation::get_sum() {
scitbx::af::shared<std::complex<double> > sf(n_h);
assert(amplitudes_allocated);
cudaSafeCall( hipMemcpy(h_real,d_real,n_h*sizeof(fType),
hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(h_imag,d_imag,n_h*sizeof(fType),
hipMemcpyDeviceToHost) );
for (int i=0; i<n_h; i++) {
sf[i] = std::complex<double>(double(h_real[i]),double(h_imag[i]));
}
return sf;
}
/* ==========================================================================
*/
}
}
| ed00eaff731cad2379e42dc700715b0530203336.cu | #include <cudatbx/scattering/direct_summation.cuh>
namespace cudatbx {
namespace scattering {
/* ==========================================================================
*/
cudatbx::scattering::direct_summation::direct_summation() {
// set host and device pointers to NULL
h_xyz = NULL;
h_solvent = NULL;
h_h = NULL;
h_rt = NULL;
h_weights = NULL;
h_scattering_type = NULL;
h_a = NULL;
h_b = NULL;
h_c = NULL;
d_xyz = NULL;
d_solvent = NULL;
d_h = NULL;
d_rt = NULL;
d_weights = NULL;
d_scattering_type = NULL;
amplitudes_allocated = false;
h_real = NULL;
h_imag = NULL;
d_real = NULL;
d_imag = NULL;
workspace_allocated = false;
d_workspace = NULL;
}
cudatbx::scattering::direct_summation::~direct_summation() {
clear_arrays();
clear_amplitudes();
clear_workspace();
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_xyz
(const scitbx::af::const_ref<scitbx::vec3<double> >& xyz) {
// allocate memory if necessary
if (n_xyz != xyz.size()) {
clear_xyz();
n_xyz = xyz.size();
padded_n_xyz = cudatbx::calculate_padded_size(n_xyz,padding);
size_xyz = 3 * padded_n_xyz;
h_xyz = new fType[size_xyz];
cudaSafeCall( cudaMalloc((void**)&d_xyz,size_xyz*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_xyz; i++) {
for (int j=0; j<3; j++) {
h_xyz[j*padded_n_xyz + i] = fType(xyz[i][j]);
}
}
// transfer to GPU
cudaSafeCall( cudaMemcpy(d_xyz, h_xyz, size_xyz*sizeof(fType),
cudaMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_xyz() {
delete[] h_xyz;
cudaSafeCall( cudaFree(d_xyz) );
h_xyz = NULL;
d_xyz = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_solvent_weights
(const scitbx::af::const_ref<double>& solvent_weights) {
// allocate memory if necessary
SCITBX_ASSERT (n_xyz == solvent_weights.size());
if (n_solvent != solvent_weights.size()) {
clear_solvent_weights();
n_solvent = solvent_weights.size();
h_solvent = new fType[padded_n_xyz];
cudaSafeCall( cudaMalloc((void**)&d_solvent,padded_n_xyz*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_xyz; i++) {
h_solvent[i] = fType(solvent_weights[i]);
}
// transfer to GPU
cudaSafeCall( cudaMemcpy(d_solvent, h_solvent,
padded_n_xyz*sizeof(fType),
cudaMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_solvent_weights() {
delete[] h_solvent;
cudaSafeCall( cudaFree(d_solvent) );
h_solvent = NULL;
d_solvent = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_hkl
(const scitbx::af::const_ref<scitbx::vec3<double> >& h) {
// allocate memory if necessary
if (n_h != h.size()) {
clear_hkl();
n_h = h.size();
padded_n_h = cudatbx::calculate_padded_size(n_h,padding);
size_h = 3 * padded_n_h;
h_h = new fType[size_h];
cudaSafeCall( cudaMalloc((void**)&d_h,size_h*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_h; i++) {
for (int j=0; j<3; j++) {
h_h[j*padded_n_h + i] = fType(h[i][j]);
}
}
// transfer to GPU
cudaSafeCall( cudaMemcpy(d_h, h_h, size_h*sizeof(fType),
cudaMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_hkl() {
delete[] h_h;
cudaSafeCall( cudaFree(d_h) );
h_h = NULL;
d_h = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_q
(const scitbx::af::const_ref<double>& q) {
// q data, use h variables
if (n_h != q.size()) {
clear_q();
n_h = q.size();
padded_n_h = cudatbx::calculate_padded_size(n_h,padding);
size_h = padded_n_h;
h_h = new fType[size_h];
cudaSafeCall( cudaMalloc((void**)&d_h,size_h*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_h; i++) {
h_h[i] = fType(q[i]);
}
// transfer to GPU
cudaSafeCall( cudaMemcpy(d_h, h_h, size_h*sizeof(fType),
cudaMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_q() {
clear_hkl();
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_lattice
(const scitbx::af::const_ref<double>& lattice_weights,
const scitbx::af::const_ref<double>& lattice) {
// lattice points, use rotation/translation
if (n_rt != lattice_weights.size()) {
clear_lattice();
n_rt = lattice_weights.size();
size_rt = cudatbx::calculate_padded_size(n_rt,padding);
h_weights = new fType[size_rt];
h_rt = new fType[3*size_rt];
cudaSafeCall( cudaMalloc((void**)&d_weights,size_rt*sizeof(fType)) );
cudaSafeCall( cudaMalloc((void**)&d_rt,3*size_rt*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_rt; i++) {
h_weights[i] = fType(lattice_weights[i]/n_rt);
for (int j=0; j<3; j++) {
h_rt[j*size_rt + i] = fType(lattice[j*n_rt + i]);
}
}
// transfer to GPU
cudaSafeCall( cudaMemcpy(d_weights, h_weights, size_rt*sizeof(fType),
cudaMemcpyHostToDevice) );
cudaSafeCall( cudaMemcpy(d_rt, h_rt, 3*size_rt*sizeof(fType),
cudaMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_weights() {
delete[] h_weights;
cudaSafeCall( cudaFree(d_weights) );
h_weights = NULL;
d_weights = NULL;
}
void cudatbx::scattering::direct_summation::clear_lattice() {
clear_weights();
clear_rotations_translations();
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_rotations_translations
(const scitbx::af::const_ref<double>& rotations,
const scitbx::af::const_ref<scitbx::vec3<double> >& translations) {
// each rotation/translation pair is combined and padded to take up
// 64 bytes so that a coalesced read will read two pairs
if (n_rt != translations.size()) {
clear_rotations_translations();
n_rt = translations.size();
size_rt = padded_size * n_rt;
h_rt = new fType[size_rt];
cudaSafeCall( cudaMalloc((void**)&d_rt,size_rt*sizeof(fType)) );
}
// convert values
for (int i=0; i<n_rt; i++) {
for (int j=0; j<9; j++) {
h_rt[padded_size*i + j] = fType(rotations[9*i + j]);
}
for (int j=0; j<3; j++) {
h_rt[padded_size*i + j + 9] = fType(translations[i][j]);
}
}
// transfer to GPU
cudaSafeCall( cudaMemcpy(d_rt, h_rt, size_rt*sizeof(fType),
cudaMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_rotations_translations() {
delete[] h_rt;
cudaSafeCall( cudaFree(d_rt) );
h_rt = NULL;
d_rt = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_scattering_types
(const scitbx::af::const_ref<std::string>& scatterers,
const cctbx::xray::scattering_type_registry& registry) {
// allocate memory if necessary
SCITBX_ASSERT (n_xyz == scatterers.size());
if (n_scatterers != scatterers.size()) {
clear_scattering_types();
n_scatterers = scatterers.size();
h_scattering_type = new int[padded_n_xyz];
cudaSafeCall( cudaMalloc((void**)&d_scattering_type,
padded_n_xyz*sizeof(int)) );
}
// convert values
for (int i=0; i<n_xyz; i++) {
h_scattering_type[i] = registry.unique_index(scatterers[i]);
}
// transfer to GPU
cudaSafeCall( cudaMemcpy(d_scattering_type,h_scattering_type,
padded_n_xyz*sizeof(int),cudaMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_scattering_types() {
delete[] h_scattering_type;
cudaSafeCall( cudaFree(d_scattering_type) );
h_scattering_type = NULL;
d_scattering_type = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::set_scattering_type_registry
(const cctbx::xray::scattering_type_registry& registry,
const bool& complex_form_factor) {
// convert form factors
// add ordinary oxygen form factor at end for boundary layer solvent
scitbx::af::shared<boost::optional
<cctbx::eltbx::xray_scattering::gaussian> >
unique_gaussians = registry.unique_gaussians;
n_types = unique_gaussians.size() + 1;
n_terms = unique_gaussians[0].get().n_terms();
f_size = n_types * n_terms;
delete[] h_a;
delete[] h_b;
delete[] h_c;
h_a = new fType[f_size];
h_b = new fType[f_size];
h_c = new fType[n_types];
for (int i=0; i<f_size; i++) {
h_a[i] = fType(0.0);
h_b[i] = fType(0.0);
}
for (int i=0; i<n_types-1; i++) {
for (int j=0; j<n_terms; j++) {
h_a[i*n_terms + j] = fType(unique_gaussians[i].get().array_of_a()[j]);
h_b[i*n_terms + j] = fType(unique_gaussians[i].get().array_of_b()[j]);
}
if (unique_gaussians[i].get().use_c()) {
h_c[i] = fType(unique_gaussians[i].get().c());
}
else {
h_c[i] = fType(0.0);
}
}
// add form factor for boundary layer solvent
cctbx::eltbx::xray_scattering::gaussian hoh =
cctbx::eltbx::xray_scattering::wk1995("O",true).fetch();
for (int i=0; i<hoh.array_of_a().size(); i++){
h_a[(n_types-1)*n_terms + i] = fType(hoh.array_of_a()[i]);
h_b[(n_types-1)*n_terms + i] = fType(hoh.array_of_b()[i]);
}
if (hoh.use_c()) {
h_c[n_types-1] = fType(hoh.c());
}
else {
h_c[n_types-1] = fType(0.0);
}
// transfer to GPU
cudaSafeCall( cudaMemcpyToSymbol(dc_a, h_a, f_size*sizeof(fType)) );
cudaSafeCall( cudaMemcpyToSymbol(dc_b, h_b, f_size*sizeof(fType)) );
cudaSafeCall( cudaMemcpyToSymbol(dc_c, h_c, n_types*sizeof(fType)) );
cudaSafeCall( cudaMemcpyToSymbol(dc_n_types, &n_types, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(dc_n_terms, &n_terms, sizeof(int)) );
cudaSafeCall( cudaMemcpyToSymbol(dc_complex_form_factor,
&complex_form_factor, sizeof(bool)) );
}
void cudatbx::scattering::direct_summation::clear_scattering_type_registry() {
delete[] h_a;
delete[] h_b;
delete[] h_c;
h_a = NULL;
h_b = NULL;
h_c = NULL;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::clear_arrays() {
// clear pointers and set all pointers to NULL
clear_xyz();
clear_solvent_weights();
clear_hkl();
clear_rotations_translations();
clear_weights();
clear_scattering_types();
clear_scattering_type_registry();
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::allocate_amplitudes() {
if (amplitudes_allocated) {
clear_amplitudes();
}
h_real = new fType[n_h];
h_imag = new fType[n_h];
cudaSafeCall( cudaMalloc((void**)&d_real,n_h*sizeof(fType)) );
cudaSafeCall( cudaMalloc((void**)&d_imag,n_h*sizeof(fType)) );
amplitudes_allocated = true;
}
void cudatbx::scattering::direct_summation::reset_amplitudes() {
fType zero = fType(0.0);
for (int i=0; i<n_h; i++) {
h_real[i] = zero;
h_imag[i] = zero;
}
cudaSafeCall( cudaMemcpy(d_real,h_real,n_h*sizeof(fType),
cudaMemcpyHostToDevice) );
cudaSafeCall( cudaMemcpy(d_imag,h_imag,n_h*sizeof(fType),
cudaMemcpyHostToDevice) );
}
void cudatbx::scattering::direct_summation::clear_amplitudes() {
delete[] h_real;
delete[] h_imag;
cudaSafeCall( cudaFree(d_real) );
cudaSafeCall( cudaFree(d_imag) );
h_real = NULL;
h_imag = NULL;
d_real = NULL;
d_imag = NULL;
amplitudes_allocated = false;
}
// --------------------------------------------------------------------------
void cudatbx::scattering::direct_summation::allocate_workspace
(const int& length) {
if (workspace_allocated) {
clear_workspace();
}
cudaSafeCall( cudaMalloc((void**)&d_workspace,length*sizeof(fType)) );
workspace_allocated = true;
}
void cudatbx::scattering::direct_summation::clear_workspace() {
cudaSafeCall( cudaFree(d_workspace) );
d_workspace = NULL;
workspace_allocated = false;
}
/* --------------------------------------------------------------------------
reorganizes data and calls cuda
padded to multiple of 128 bytes, (32 * sizeof(float or int))
*/
void cudatbx::scattering::direct_summation::run_kernel() {
int blocks_per_grid = cudatbx::calculate_blocks_per_grid(n_h,threads_per_block);
structure_factor_kernel<fType><<<blocks_per_grid,threads_per_block>>>
(d_scattering_type, d_xyz, d_solvent, n_xyz, padded_n_xyz,
d_h, n_h, padded_n_h,
d_rt, n_rt,
d_real, d_imag);
}
void cudatbx::scattering::direct_summation::add
(const scitbx::af::const_ref<std::string>& scatterers,
const scitbx::af::const_ref<scitbx::vec3<double> >& xyz,
const scitbx::af::const_ref<double>& solvent_weights,
const scitbx::af::const_ref<scitbx::vec3<double> >& h,
const scitbx::af::const_ref<double>& rotations,
const scitbx::af::const_ref<scitbx::vec3<double> >& translations,
const cctbx::xray::scattering_type_registry& registry,
const bool& complex_form_factor) {
// reorganize input data, allocates arrays, transfer to GPU, order matters
set_xyz(xyz);
set_solvent_weights(solvent_weights);
set_hkl(h);
set_rotations_translations(rotations,translations);
set_scattering_types(scatterers,registry);
set_scattering_type_registry(registry,complex_form_factor);
// allocate arrays for results if necessary
if (!amplitudes_allocated) {
allocate_amplitudes();
reset_amplitudes();
}
// run calculation
run_kernel();
// deallocate arrays
clear_arrays();
}
/* --------------------------------------------------------------------------
reorganizes data and calls cuda
padded to multiple of 128 bytes, (32 * sizeof(float or int))
"Rapid and accurate calculation of small-angle scattering profiles using
the golden ratio"
Watson, MC, Curtis, JE. J. Appl. Cryst. (2013). 46, 1171-1177
solvent variables are used for weights and code is not optimal
possibly subclass or split everything into functions
*/
void cudatbx::scattering::direct_summation::prepare_saxs
(const scitbx::af::const_ref<std::string>& scatterers,
const scitbx::af::const_ref<scitbx::vec3<double> >& xyz,
const scitbx::af::const_ref<double>& solvent_weights,
const scitbx::af::const_ref<double>& q,
const scitbx::af::const_ref<double>& lattice_weights,
const scitbx::af::const_ref<double>& lattice,
const cctbx::xray::scattering_type_registry& registry,
const bool& complex_form_factor) {
// reorganize input data, allocates arrays, transfer to GPU, order matters
set_xyz(xyz);
set_solvent_weights(solvent_weights);
set_q(q);
set_lattice(lattice_weights,lattice);
set_scattering_types(scatterers,registry);
set_scattering_type_registry(registry,complex_form_factor);
// allocate arrays for results if necessary
if (!amplitudes_allocated) {
allocate_amplitudes();
}
}
void cudatbx::scattering::direct_summation::run_saxs_kernel() {
// allocate working space if necessary
if (!workspace_allocated) {
workspace_size = int(std::floor(n_h*n_rt/padding + 1.0)) * padding;
allocate_workspace(3*workspace_size);
}
int blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_rt,threads_per_block);
expand_q_lattice_kernel<fType><<<blocks_per_grid,threads_per_block>>>
(d_h, n_h,
d_rt, n_rt, size_rt,
d_workspace, workspace_size);
blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_h*n_rt,threads_per_block);
saxs_kernel<fType><<<blocks_per_grid,threads_per_block>>>
(d_scattering_type, d_xyz, d_solvent, n_xyz, padded_n_xyz,
n_h, n_rt,
d_workspace, workspace_size);
}
void cudatbx::scattering::direct_summation::run_solvent_saxs_kernel() {
// allocate working space if necessary
if (!workspace_allocated) {
workspace_size = int(std::floor(n_h*n_rt/padding + 1.0)) * padding;
allocate_workspace(7*workspace_size);
}
int blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_rt,threads_per_block);
expand_q_lattice_kernel<fType><<<blocks_per_grid,threads_per_block>>>
(d_h, n_h,
d_rt, n_rt, size_rt,
d_workspace, workspace_size);
blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_h*n_rt,threads_per_block);
solvent_saxs_kernel<fType><<<blocks_per_grid,threads_per_block>>>
(d_scattering_type, d_xyz, d_solvent, n_xyz, padded_n_xyz,
n_h, n_rt,
d_workspace, workspace_size);
}
void cudatbx::scattering::direct_summation::run_collect_solvent_saxs_kernel
(const double& c1, const double& c2) {
assert(workspace_allocated);
int blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_h*n_rt,threads_per_block);
collect_solvent_saxs_kernel<fType><<<blocks_per_grid,threads_per_block>>>
(n_h, n_rt,fType(c1),fType(c2),d_workspace, workspace_size);
}
void cudatbx::scattering::direct_summation::sum_over_lattice() {
int blocks_per_grid = cudatbx::calculate_blocks_per_grid
(n_rt,threads_per_block);
for (int i=0; i<n_h; i++) {
cudatbx::math::weighted_sum_kernel<fType>
<<<blocks_per_grid,threads_per_block,threads_per_block*sizeof(fType)>>>
(&d_workspace[i*n_rt],d_weights,n_rt,&d_real[i]);
}
}
/* --------------------------------------------------------------------------
return total sum
*/
scitbx::af::shared<std::complex<double> >
cudatbx::scattering::direct_summation::get_sum() {
scitbx::af::shared<std::complex<double> > sf(n_h);
assert(amplitudes_allocated);
cudaSafeCall( cudaMemcpy(h_real,d_real,n_h*sizeof(fType),
cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(h_imag,d_imag,n_h*sizeof(fType),
cudaMemcpyDeviceToHost) );
for (int i=0; i<n_h; i++) {
sf[i] = std::complex<double>(double(h_real[i]),double(h_imag[i]));
}
return sf;
}
/* ==========================================================================
*/
}
}
|
7a777bdfac457439c1e62748a1bbb0908bbe0377.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
extern "C" {
#include <lofs-read.h>
#include <lofs-dirstruct.h>
#include <lofs-hdf2nc.h>
#include <lofs-limits.h>
#include <lofs-macros.h>
}
#include "../include/datastructs.h"
#include "../calc/calcmomentum.cu"
#ifndef MOMENTUM_CU
#define MOMENTUM_CU
/*
* Copyright (C) 2017-2020 Kelton Halbert, Space Science and Engineering Center (SSEC), University of Wisconsin - Madison
* Written by Kelton Halbert at the University of Wisconsin - Madison,
* Cooperative Institute for Meteorological Satellite Studies (CIMSS),
* Space Science and Engineering Center (SSEC). Provided under the Apache 2.0 License.
* Email: [email protected]
*/
__global__ void cuCalcBuoy(grid *gd, mesh *msh, sounding *snd, float *thrhopert, float *buoy) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int NX = gd->NX;
int NY = gd->NY;
int NZ = gd->NZ;
if ((i < NX+1) && (j < NY+1) && (k > 0) && (k < NZ+1)) {
calc_buoyancy(thrhopert, snd->th0, snd->qv0, buoy, i, j, k, NX, NY);
}
}
__global__ void cuCalcPgradU(grid *gd, mesh *msh, sounding *snd, float *pipert, float *thrhopert, float *pgradu) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int NX = gd->NX;
int NY = gd->NY;
int NZ = gd->NZ;
float dx;
if ((i < NX) && (j < NY+1) && (i > 0) && (k < NZ+1)) {
dx = xh(i) - xh(i-1);
calc_pgrad_u(pipert, thrhopert, snd->qv0, snd->th0, pgradu, dx, i, j, k, NX, NY);
}
}
__global__ void cuCalcPgradV(grid *gd, mesh *msh, sounding *snd, float *pipert, float *thrhopert, float *pgradv) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int NX = gd->NX;
int NY = gd->NY;
int NZ = gd->NZ;
float dy;
if ((i < NX+1) && (j < NY) && (j > 0) && (k < NZ+1)) {
dy = yh(j) - yh(j-1);
calc_pgrad_v(pipert, thrhopert, snd->qv0, snd->th0, pgradv, dy, i, j, k, NX, NY);
}
}
__global__ void cuCalcPgradW(grid *gd, mesh *msh, sounding *snd, float *pipert, float *thrhopert, float *pgradw) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int NX = gd->NX;
int NY = gd->NY;
int NZ = gd->NZ;
float dz;
if ((i < NX+1) && (j < NY+1) && (k > 0) && (k < NZ+1)) {
dz = zh(k) - zh(k-1);
calc_pgrad_w(pipert, thrhopert, snd->qv0, snd->th0, pgradw, dz, i, j, k, NX, NY);
}
}
#endif
| 7a777bdfac457439c1e62748a1bbb0908bbe0377.cu | #include <iostream>
#include <stdio.h>
extern "C" {
#include <lofs-read.h>
#include <lofs-dirstruct.h>
#include <lofs-hdf2nc.h>
#include <lofs-limits.h>
#include <lofs-macros.h>
}
#include "../include/datastructs.h"
#include "../calc/calcmomentum.cu"
#ifndef MOMENTUM_CU
#define MOMENTUM_CU
/*
* Copyright (C) 2017-2020 Kelton Halbert, Space Science and Engineering Center (SSEC), University of Wisconsin - Madison
* Written by Kelton Halbert at the University of Wisconsin - Madison,
* Cooperative Institute for Meteorological Satellite Studies (CIMSS),
* Space Science and Engineering Center (SSEC). Provided under the Apache 2.0 License.
* Email: [email protected]
*/
__global__ void cuCalcBuoy(grid *gd, mesh *msh, sounding *snd, float *thrhopert, float *buoy) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int NX = gd->NX;
int NY = gd->NY;
int NZ = gd->NZ;
if ((i < NX+1) && (j < NY+1) && (k > 0) && (k < NZ+1)) {
calc_buoyancy(thrhopert, snd->th0, snd->qv0, buoy, i, j, k, NX, NY);
}
}
__global__ void cuCalcPgradU(grid *gd, mesh *msh, sounding *snd, float *pipert, float *thrhopert, float *pgradu) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int NX = gd->NX;
int NY = gd->NY;
int NZ = gd->NZ;
float dx;
if ((i < NX) && (j < NY+1) && (i > 0) && (k < NZ+1)) {
dx = xh(i) - xh(i-1);
calc_pgrad_u(pipert, thrhopert, snd->qv0, snd->th0, pgradu, dx, i, j, k, NX, NY);
}
}
__global__ void cuCalcPgradV(grid *gd, mesh *msh, sounding *snd, float *pipert, float *thrhopert, float *pgradv) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int NX = gd->NX;
int NY = gd->NY;
int NZ = gd->NZ;
float dy;
if ((i < NX+1) && (j < NY) && (j > 0) && (k < NZ+1)) {
dy = yh(j) - yh(j-1);
calc_pgrad_v(pipert, thrhopert, snd->qv0, snd->th0, pgradv, dy, i, j, k, NX, NY);
}
}
__global__ void cuCalcPgradW(grid *gd, mesh *msh, sounding *snd, float *pipert, float *thrhopert, float *pgradw) {
// get our 3D index based on our blocks/threads
int i = (blockIdx.x*blockDim.x) + threadIdx.x;
int j = (blockIdx.y*blockDim.y) + threadIdx.y;
int k = (blockIdx.z*blockDim.z) + threadIdx.z;
int NX = gd->NX;
int NY = gd->NY;
int NZ = gd->NZ;
float dz;
if ((i < NX+1) && (j < NY+1) && (k > 0) && (k < NZ+1)) {
dz = zh(k) - zh(k-1);
calc_pgrad_w(pipert, thrhopert, snd->qv0, snd->th0, pgradw, dz, i, j, k, NX, NY);
}
}
#endif
|
c7d793351f8c4c0130a6f8050948f771a979de0e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Damier3DMath_RGBA.h"
#include "Indice2D.h"
#include "IndiceTools_GPU.h"
#include "DomainMath3D_GPU.h"
#include "cudaTools.h"
#include "Device.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void mandelbrot3DCuda(float3* ptrDevVerticesCoord, uchar4* ptrDevVerticesColor, int w, int h, DomainMath3D domaineMath, int n, float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* w nbPoint en x
* h nbPoint en y
*/
__global__ void mandelbrot3DCuda(float3* ptrDevVerticesCoord, uchar4* ptrDevVerticesColor, int w, int h, DomainMath3D domaineMath, int n, float t)
{
Mandelbrot3DMath_RGBA damierMath(n);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
uchar4 color;
float3 sommet;
float x;
float y;
float DX;
float DY;
int vertexI;
int vertexJ;
domaineMath.delta(w,h,&DX,&DY);
int s = TID;
while (s < WH)
{
IndiceTools::toIJ(s, w, &vertexI, &vertexJ); // update (vertexI, vertexJ)
domaineMath.toXY(DX,DY,vertexI,vertexJ,&x,&y);
damierMath.sommetXY(&sommet,x,y,t); // update sommet
damierMath.colorZ(&color, sommet.z); // update color
ptrDevVerticesColor[s] = color;
ptrDevVerticesCoord[s] = sommet;
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| c7d793351f8c4c0130a6f8050948f771a979de0e.cu | #include "Damier3DMath_RGBA.h"
#include "Indice2D.h"
#include "IndiceTools_GPU.h"
#include "DomainMath3D_GPU.h"
#include "cudaTools.h"
#include "Device.h"
using namespace gpu;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
__global__ void mandelbrot3DCuda(float3* ptrDevVerticesCoord, uchar4* ptrDevVerticesColor, int w, int h, DomainMath3D domaineMath, int n, float t);
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/**
* w nbPoint en x
* h nbPoint en y
*/
__global__ void mandelbrot3DCuda(float3* ptrDevVerticesCoord, uchar4* ptrDevVerticesColor, int w, int h, DomainMath3D domaineMath, int n, float t)
{
Mandelbrot3DMath_RGBA damierMath(n);
const int TID = Indice2D::tid();
const int NB_THREAD = Indice2D::nbThread();
const int WH = w * h;
uchar4 color;
float3 sommet;
float x;
float y;
float DX;
float DY;
int vertexI;
int vertexJ;
domaineMath.delta(w,h,&DX,&DY);
int s = TID;
while (s < WH)
{
IndiceTools::toIJ(s, w, &vertexI, &vertexJ); // update (vertexI, vertexJ)
domaineMath.toXY(DX,DY,vertexI,vertexJ,&x,&y);
damierMath.sommetXY(&sommet,x,y,t); // update sommet
damierMath.colorZ(&color, sommet.z); // update color
ptrDevVerticesColor[s] = color;
ptrDevVerticesCoord[s] = sommet;
s += NB_THREAD;
}
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
521448d54d9a84b292b53fe5e9224c5a79213efe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_minus_4_bot;
int xdim0_update_halo_kernel2_yvel_minus_4_bot_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_minus_4_bot;
int ydim0_update_halo_kernel2_yvel_minus_4_bot_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_minus_4_bot;
int xdim1_update_halo_kernel2_yvel_minus_4_bot_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_minus_4_bot;
int ydim1_update_halo_kernel2_yvel_minus_4_bot_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_minus_4_bot * (y) + \
xdim0_update_halo_kernel2_yvel_minus_4_bot * \
ydim0_update_halo_kernel2_yvel_minus_4_bot * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_minus_4_bot * (y) + \
xdim1_update_halo_kernel2_yvel_minus_4_bot * \
ydim1_update_halo_kernel2_yvel_minus_4_bot * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_minus_4_bot(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = -yvel0[OPS_ACC0(0, 4, 0)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = -yvel1[OPS_ACC1(0, 4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_minus_4_bot(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_bot +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_bot *
ydim0_update_halo_kernel2_yvel_minus_4_bot;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_bot +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_bot *
ydim1_update_halo_kernel2_yvel_minus_4_bot;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_minus_4_bot(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_minus_4_bot(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 81))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(81, "update_halo_kernel2_yvel_minus_4_bot");
OPS_kernels[81].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_4_bot_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_minus_4_bot_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_minus_4_bot_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_minus_4_bot_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_minus_4_bot, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_minus_4_bot_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_minus_4_bot, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_minus_4_bot_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_minus_4_bot, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_minus_4_bot_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_minus_4_bot, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_minus_4_bot_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[81].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_minus_4_bot), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[81].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[81].mpi_time += t2 - t1;
OPS_kernels[81].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[81].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 521448d54d9a84b292b53fe5e9224c5a79213efe.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_yvel_minus_4_bot;
int xdim0_update_halo_kernel2_yvel_minus_4_bot_h = -1;
__constant__ int ydim0_update_halo_kernel2_yvel_minus_4_bot;
int ydim0_update_halo_kernel2_yvel_minus_4_bot_h = -1;
__constant__ int xdim1_update_halo_kernel2_yvel_minus_4_bot;
int xdim1_update_halo_kernel2_yvel_minus_4_bot_h = -1;
__constant__ int ydim1_update_halo_kernel2_yvel_minus_4_bot;
int ydim1_update_halo_kernel2_yvel_minus_4_bot_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_yvel_minus_4_bot * (y) + \
xdim0_update_halo_kernel2_yvel_minus_4_bot * \
ydim0_update_halo_kernel2_yvel_minus_4_bot * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_yvel_minus_4_bot * (y) + \
xdim1_update_halo_kernel2_yvel_minus_4_bot * \
ydim1_update_halo_kernel2_yvel_minus_4_bot * (z))
// user function
__device__
inline void
update_halo_kernel2_yvel_minus_4_bot(double *yvel0, double *yvel1,
const int *fields) {
if (fields[FIELD_YVEL0] == 1)
yvel0[OPS_ACC0(0, 0, 0)] = -yvel0[OPS_ACC0(0, 4, 0)];
if (fields[FIELD_YVEL1] == 1)
yvel1[OPS_ACC1(0, 0, 0)] = -yvel1[OPS_ACC1(0, 4, 0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_yvel_minus_4_bot(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_bot +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_yvel_minus_4_bot *
ydim0_update_halo_kernel2_yvel_minus_4_bot;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_bot +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_yvel_minus_4_bot *
ydim1_update_halo_kernel2_yvel_minus_4_bot;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_yvel_minus_4_bot(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_yvel_minus_4_bot(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1,
ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 81))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(81, "update_halo_kernel2_yvel_minus_4_bot");
OPS_kernels[81].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_yvel_minus_4_bot_h ||
ydim0 != ydim0_update_halo_kernel2_yvel_minus_4_bot_h ||
xdim1 != xdim1_update_halo_kernel2_yvel_minus_4_bot_h ||
ydim1 != ydim1_update_halo_kernel2_yvel_minus_4_bot_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_yvel_minus_4_bot, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_yvel_minus_4_bot_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_yvel_minus_4_bot, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_yvel_minus_4_bot_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_yvel_minus_4_bot, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_yvel_minus_4_bot_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_yvel_minus_4_bot, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_yvel_minus_4_bot_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[81].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_yvel_minus_4_bot<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[81].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[81].mpi_time += t2 - t1;
OPS_kernels[81].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[81].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
77d2966abb2a686f3ad148889a410b25f3d8cf78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template<class T>
__device__ const T& mymin(const T& a, const T& b)
{
return (b < a) ? b : a;
}
__global__ void call_min(double* first, const double* second)
{
first[threadIdx.x] = mymin(first[threadIdx.x], second[threadIdx.x]);
}
| 77d2966abb2a686f3ad148889a410b25f3d8cf78.cu | template<class T>
__device__ const T& mymin(const T& a, const T& b)
{
return (b < a) ? b : a;
}
__global__ void call_min(double* first, const double* second)
{
first[threadIdx.x] = mymin(first[threadIdx.x], second[threadIdx.x]);
}
|
a731f77b9c559bf5ce957a9b812bef0abb601995.hip | // !!! This is a file automatically generated by hipify!!!
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki
// =============================================================================
//
// Implementation of FSI system that includes all subclasses for proximity and
// force calculation, and time integration.
//
// =============================================================================
#include <thrust/copy.h>
#include <thrust/gather.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#include "chrono_fsi/physics/ChSystemFsi_impl.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
namespace chrono {
namespace fsi {
struct sphTypeCompEqual {
__host__ __device__ bool operator()(const Real4& o1, const Real4& o2) { return o1.w == o2.w; }
};
//---------------------------------------------------------------------------------------
zipIterSphD SphMarkerDataD::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadD.begin(), velMasD.begin(), rhoPresMuD.begin(),
tauXxYyZzD.begin(), tauXyXzYzD.begin()));
}
void SphMarkerDataD::resize(size_t s) {
posRadD.resize(s);
velMasD.resize(s);
rhoPresMuD.resize(s);
tauXxYyZzD.resize(s);
tauXyXzYzD.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterSphH SphMarkerDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadH.begin(), velMasH.begin(), rhoPresMuH.begin(),
tauXxYyZzH.begin(), tauXyXzYzH.begin()));
}
// resize
void SphMarkerDataH::resize(size_t s) {
posRadH.resize(s);
velMasH.resize(s);
rhoPresMuH.resize(s);
tauXxYyZzH.resize(s);
tauXyXzYzH.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterRigidD FsiBodiesDataD::iterator() {
return thrust::make_zip_iterator(
thrust::make_tuple(posRigid_fsiBodies_D.begin(), velMassRigid_fsiBodies_D.begin(), accRigid_fsiBodies_D.begin(),
q_fsiBodies_D.begin(), omegaVelLRF_fsiBodies_D.begin(), omegaAccLRF_fsiBodies_D.begin()));
}
void FsiBodiesDataD::resize(size_t s) {
posRigid_fsiBodies_D.resize(s);
velMassRigid_fsiBodies_D.resize(s);
accRigid_fsiBodies_D.resize(s);
q_fsiBodies_D.resize(s);
omegaVelLRF_fsiBodies_D.resize(s);
omegaAccLRF_fsiBodies_D.resize(s);
}
void FsiShellsDataH::resize(size_t s) {
posFlex_fsiBodies_nA_H.resize(s);
posFlex_fsiBodies_nB_H.resize(s);
posFlex_fsiBodies_nC_H.resize(s);
posFlex_fsiBodies_nD_H.resize(s);
velFlex_fsiBodies_nA_H.resize(s);
velFlex_fsiBodies_nB_H.resize(s);
velFlex_fsiBodies_nC_H.resize(s);
velFlex_fsiBodies_nD_H.resize(s);
accFlex_fsiBodies_nA_H.resize(s);
accFlex_fsiBodies_nB_H.resize(s);
accFlex_fsiBodies_nC_H.resize(s);
accFlex_fsiBodies_nD_H.resize(s);
}
void FsiShellsDataD::resize(size_t s) {
posFlex_fsiBodies_nA_D.resize(s);
posFlex_fsiBodies_nB_D.resize(s);
posFlex_fsiBodies_nC_D.resize(s);
posFlex_fsiBodies_nD_D.resize(s);
velFlex_fsiBodies_nA_D.resize(s);
velFlex_fsiBodies_nB_D.resize(s);
velFlex_fsiBodies_nC_D.resize(s);
velFlex_fsiBodies_nD_D.resize(s);
accFlex_fsiBodies_nA_D.resize(s);
accFlex_fsiBodies_nB_D.resize(s);
accFlex_fsiBodies_nC_D.resize(s);
accFlex_fsiBodies_nD_D.resize(s);
}
void FsiMeshDataH::resize(size_t s) {
pos_fsi_fea_H.resize(s);
vel_fsi_fea_H.resize(s);
acc_fsi_fea_H.resize(s);
}
void FsiMeshDataD::resize(size_t s) {
pos_fsi_fea_D.resize(s);
vel_fsi_fea_D.resize(s);
acc_fsi_fea_D.resize(s);
}
void FsiBodiesDataD::CopyFromH(const FsiBodiesDataH& other) {
thrust::copy(other.posRigid_fsiBodies_H.begin(), other.posRigid_fsiBodies_H.end(), posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_H.begin(), other.velMassRigid_fsiBodies_H.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_H.begin(), other.accRigid_fsiBodies_H.end(), accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_H.begin(), other.q_fsiBodies_H.end(), q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_H.begin(), other.omegaVelLRF_fsiBodies_H.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_H.begin(), other.omegaAccLRF_fsiBodies_H.end(),
omegaAccLRF_fsiBodies_D.begin());
}
void FsiShellsDataD::CopyFromH(const FsiShellsDataH& other) {
thrust::copy(other.posFlex_fsiBodies_nA_H.begin(), other.posFlex_fsiBodies_nA_H.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_H.begin(), other.posFlex_fsiBodies_nB_H.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_H.begin(), other.posFlex_fsiBodies_nC_H.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_H.begin(), other.posFlex_fsiBodies_nD_H.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_H.begin(), other.velFlex_fsiBodies_nA_H.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_H.begin(), other.velFlex_fsiBodies_nB_H.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_H.begin(), other.velFlex_fsiBodies_nC_H.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_H.begin(), other.velFlex_fsiBodies_nD_H.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_H.begin(), other.accFlex_fsiBodies_nA_H.end(),
accFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_H.begin(), other.accFlex_fsiBodies_nB_H.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_H.begin(), other.accFlex_fsiBodies_nC_H.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_H.begin(), other.accFlex_fsiBodies_nD_H.end(),
accFlex_fsiBodies_nD_D.begin());
}
void FsiMeshDataD::CopyFromH(const FsiMeshDataH& other) {
thrust::copy(other.pos_fsi_fea_H.begin(), other.pos_fsi_fea_H.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_H.begin(), other.vel_fsi_fea_H.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_H.begin(), other.acc_fsi_fea_H.end(), acc_fsi_fea_D.begin());
}
FsiBodiesDataD& FsiBodiesDataD::operator=(const FsiBodiesDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posRigid_fsiBodies_D.begin(), other.posRigid_fsiBodies_D.end(), posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_D.begin(), other.velMassRigid_fsiBodies_D.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_D.begin(), other.accRigid_fsiBodies_D.end(), accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_D.begin(), other.q_fsiBodies_D.end(), q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_D.begin(), other.omegaVelLRF_fsiBodies_D.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_D.begin(), other.omegaAccLRF_fsiBodies_D.end(),
omegaAccLRF_fsiBodies_D.begin());
return *this;
}
FsiShellsDataD& FsiShellsDataD::operator=(const FsiShellsDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posFlex_fsiBodies_nA_D.begin(), other.posFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_D.begin(), other.posFlex_fsiBodies_nB_D.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_D.begin(), other.posFlex_fsiBodies_nC_D.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_D.begin(), other.posFlex_fsiBodies_nD_D.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_D.begin(), other.velFlex_fsiBodies_nA_D.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_D.begin(), other.velFlex_fsiBodies_nB_D.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_D.begin(), other.velFlex_fsiBodies_nC_D.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_D.begin(), other.velFlex_fsiBodies_nD_D.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_D.begin(), other.accFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_D.begin(), other.accFlex_fsiBodies_nB_D.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_D.begin(), other.accFlex_fsiBodies_nC_D.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_D.begin(), other.accFlex_fsiBodies_nD_D.end(),
accFlex_fsiBodies_nD_D.begin());
return *this;
}
FsiMeshDataD& FsiMeshDataD::operator=(const FsiMeshDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.pos_fsi_fea_D.begin(), other.pos_fsi_fea_D.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_D.begin(), other.vel_fsi_fea_D.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_D.begin(), other.acc_fsi_fea_D.end(), acc_fsi_fea_D.begin());
return *this;
}
//---------------------------------------------------------------------------------------
zipIterRigidH FsiBodiesDataH::iterator() {
return thrust::make_zip_iterator(
thrust::make_tuple(posRigid_fsiBodies_H.begin(), velMassRigid_fsiBodies_H.begin(), accRigid_fsiBodies_H.begin(),
q_fsiBodies_H.begin(), omegaVelLRF_fsiBodies_H.begin(), omegaAccLRF_fsiBodies_H.begin()));
}
void FsiBodiesDataH::resize(size_t s) {
posRigid_fsiBodies_H.resize(s);
velMassRigid_fsiBodies_H.resize(s);
accRigid_fsiBodies_H.resize(s);
q_fsiBodies_H.resize(s);
omegaVelLRF_fsiBodies_H.resize(s);
omegaAccLRF_fsiBodies_H.resize(s);
}
//---------------------------------------------------------------------------------------
void ProximityDataD::resize(size_t s) {
gridMarkerHashD.resize(s);
gridMarkerIndexD.resize(s);
mapOriginalToSorted.resize(s);
}
//---------------------------------------------------------------------------------------
ChronoBodiesDataH::ChronoBodiesDataH(size_t s) {
resize(s);
}
ChronoShellsDataH::ChronoShellsDataH(size_t s) {
resize(s);
}
ChronoMeshDataH::ChronoMeshDataH(size_t s) {
resize(s);
}
zipIterChronoBodiesH ChronoBodiesDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(pos_ChSystemH.begin(), vel_ChSystemH.begin(),
acc_ChSystemH.begin(), quat_ChSystemH.begin(),
omegaVelGRF_ChSystemH.begin(), omegaAccGRF_ChSystemH.begin()));
}
void ChronoBodiesDataH::resize(size_t s) {
pos_ChSystemH.resize(s);
vel_ChSystemH.resize(s);
acc_ChSystemH.resize(s);
quat_ChSystemH.resize(s);
omegaVelGRF_ChSystemH.resize(s);
omegaAccGRF_ChSystemH.resize(s);
}
void ChronoShellsDataH::resize(size_t s) {
posFlex_ChSystemH_nA_H.resize(s);
posFlex_ChSystemH_nB_H.resize(s);
posFlex_ChSystemH_nC_H.resize(s);
posFlex_ChSystemH_nD_H.resize(s);
velFlex_ChSystemH_nA_H.resize(s);
velFlex_ChSystemH_nB_H.resize(s);
velFlex_ChSystemH_nC_H.resize(s);
velFlex_ChSystemH_nD_H.resize(s);
accFlex_ChSystemH_nA_H.resize(s);
accFlex_ChSystemH_nB_H.resize(s);
accFlex_ChSystemH_nC_H.resize(s);
accFlex_ChSystemH_nD_H.resize(s);
}
void ChronoMeshDataH::resize(size_t s) {
posFlex_ChSystemH_H.resize(s);
velFlex_ChSystemH_H.resize(s);
accFlex_ChSystemH_H.resize(s);
}
//---------------------------------------------------------------------------------------
ChSystemFsi_impl::ChSystemFsi_impl(std::shared_ptr<SimParams> params) : paramsH(params) {
numObjects = chrono_types::make_shared<ChCounters>();
InitNumObjects();
sphMarkersD1 = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersD2 = chrono_types::make_shared<SphMarkerDataD>();
sortedSphMarkersD = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersH = chrono_types::make_shared<SphMarkerDataH>();
fsiBodiesD1 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesD2 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesH = chrono_types::make_shared<FsiBodiesDataH>();
fsiMeshD = chrono_types::make_shared<FsiMeshDataD>();
fsiMeshH = chrono_types::make_shared<FsiMeshDataH>();
fsiGeneralData = chrono_types::make_shared<FsiGeneralData>();
markersProximityD = chrono_types::make_shared<ProximityDataD>();
}
ChSystemFsi_impl::~ChSystemFsi_impl() {}
void ChSystemFsi_impl::AddSPHParticle(Real4 pos, Real4 rhoPresMu, Real3 vel, Real3 tauXxYyZz, Real3 tauXyXzYz) {
sphMarkersH->posRadH.push_back(pos);
sphMarkersH->velMasH.push_back(vel);
sphMarkersH->rhoPresMuH.push_back(rhoPresMu);
sphMarkersH->tauXyXzYzH.push_back(tauXyXzYz);
sphMarkersH->tauXxYyZzH.push_back(tauXxYyZz);
}
void ChSystemFsi_impl::ArrangeDataManager() {
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
dummyRhoPresMuH.clear();
}
void ChSystemFsi_impl::InitNumObjects() {
numObjects->numRigidBodies = 0; // Number of rigid bodies
numObjects->numFlexBodies1D = 0; // Number of 1D Flexible bodies
numObjects->numFlexBodies2D = 0; // Number of 2D Flexible bodies
numObjects->numFlexNodes = 0; // Number of FE nodes
numObjects->numGhostMarkers = 0; // Number of ghost particles
numObjects->numHelperMarkers = 0; // Number of helper particles
numObjects->numFluidMarkers = 0; // Number of fluid SPH particles
numObjects->numBoundaryMarkers = 0; // Number of boundary SPH particles
numObjects->startRigidMarkers = 0; // Start index of the rigid SPH particles
numObjects->startFlexMarkers = 0; // Start index of the flexible SPH particles
numObjects->numRigidMarkers = 0; // Number of rigid SPH particles
numObjects->numFlexMarkers = 0; // Number of flexible SPH particles
numObjects->numAllMarkers = 0; // Total number of SPH particles
}
void ChSystemFsi_impl::CalcNumObjects() {
InitNumObjects();
size_t rSize = fsiGeneralData->referenceArray.size();
for (size_t i = 0; i < rSize; i++) {
int4 rComp4 = fsiGeneralData->referenceArray[i];
int numMarkers = rComp4.y - rComp4.x;
switch (rComp4.z) {
case -3:
numObjects->numHelperMarkers += numMarkers;
break;
case -2:
numObjects->numGhostMarkers += numMarkers;
break;
case -1:
numObjects->numFluidMarkers += numMarkers;
break;
case 0:
numObjects->numBoundaryMarkers += numMarkers;
break;
case 1:
numObjects->numRigidMarkers += numMarkers;
numObjects->numRigidBodies++;
break;
case 2:
numObjects->numFlexMarkers += numMarkers;
numObjects->numFlexBodies1D++;
break;
case 3:
numObjects->numFlexMarkers += numMarkers;
numObjects->numFlexBodies2D++;
break;
default:
std::cerr << "ERROR (CalcNumObjects): particle type not defined." << std::endl;
throw std::runtime_error("Particle type not defined.");
break;
}
}
numObjects->numFluidMarkers += numObjects->numGhostMarkers + numObjects->numHelperMarkers;
numObjects->numAllMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers +
numObjects->numRigidMarkers + numObjects->numFlexMarkers;
numObjects->startRigidMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers;
numObjects->startFlexMarkers =
numObjects->numFluidMarkers + numObjects->numBoundaryMarkers + numObjects->numRigidMarkers;
}
void ChSystemFsi_impl::ConstructReferenceArray() {
auto numAllMarkers = sphMarkersH->rhoPresMuH.size();
thrust::host_vector<int> numComponentMarkers(numAllMarkers);
thrust::fill(numComponentMarkers.begin(), numComponentMarkers.end(), 1);
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), dummyRhoPresMuH.begin());
size_t numberOfComponents =
(thrust::reduce_by_key(dummyRhoPresMuH.begin(), dummyRhoPresMuH.end(), numComponentMarkers.begin(),
dummyRhoPresMuH.begin(), numComponentMarkers.begin(), sphTypeCompEqual()))
.first -
dummyRhoPresMuH.begin();
dummyRhoPresMuH.resize(numberOfComponents);
numComponentMarkers.resize(numberOfComponents);
fsiGeneralData->referenceArray.clear();
fsiGeneralData->referenceArray_FEA.clear();
// Loop through all components loading referenceArray and referenceArray_FEA
int start_index = 0;
for (size_t i = 0; i < numberOfComponents; i++) {
int compType = (int)::floor(dummyRhoPresMuH[i].w + .1);
int phaseType = -1;
if (compType == -3) {
phaseType = -1; // For helper
} else if (compType == -2) {
phaseType = -1; // For ghost
} else if (compType == -1) {
phaseType = -1; // For fluid/granular
} else if (compType == 0) {
phaseType = 0; // For boundary
} else if (compType == 1) {
phaseType = 1; // For rigid
} else if (compType == 2) {
phaseType = 1; // For 1D cable elements
} else if (compType == 3) {
phaseType = 1; // For 2D shell elements
} else {
phaseType = 1;
}
auto new_entry = mI4(start_index, start_index + numComponentMarkers[i], compType, phaseType);
start_index += numComponentMarkers[i];
fsiGeneralData->referenceArray.push_back(new_entry);
if (compType == 2 || compType == 3)
fsiGeneralData->referenceArray_FEA.push_back(new_entry);
}
dummyRhoPresMuH.clear();
numComponentMarkers.clear();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChSystemFsi_impl::ResizeData(size_t numRigidBodies,
size_t numFlexBodies1D,
size_t numFlexBodies2D,
size_t numFlexNodes) {
ConstructReferenceArray();
CalcNumObjects();
if (numObjects->numAllMarkers != sphMarkersH->rhoPresMuH.size()) {
std::cerr << "ERROR (ResizeData): mismatch in total number of markers." << std::endl;
throw std::runtime_error("Mismatch in total number of markers.");
}
// Set number of interface objects
numObjects->numRigidBodies = numRigidBodies;
numObjects->numFlexBodies1D = numFlexBodies1D;
numObjects->numFlexBodies2D = numFlexBodies2D;
numObjects->numFlexNodes = numFlexNodes;
sphMarkersD1->resize(numObjects->numAllMarkers);
sphMarkersD2->resize(numObjects->numAllMarkers);
sortedSphMarkersD->resize(numObjects->numAllMarkers);
sphMarkersH->resize(numObjects->numAllMarkers);
markersProximityD->resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD_old.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXxYyZzD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXyXzYzD.resize(numObjects->numAllMarkers);
fsiGeneralData->vel_XSPH_D.resize(numObjects->numAllMarkers);
fsiGeneralData->vis_vel_SPH_D.resize(numObjects->numAllMarkers, mR3(1e-20));
fsiGeneralData->sr_tau_I_mu_i.resize(numObjects->numAllMarkers, mR4(1e-20));
fsiGeneralData->activityIdentifierD.resize(numObjects->numAllMarkers, 1);
fsiGeneralData->extendedActivityIdD.resize(numObjects->numAllMarkers, 1);
fsiGeneralData->freeSurfaceIdD.resize(numObjects->numAllMarkers, 0);
thrust::copy(sphMarkersH->posRadH.begin(), sphMarkersH->posRadH.end(), sphMarkersD1->posRadD.begin());
thrust::copy(sphMarkersH->velMasH.begin(), sphMarkersH->velMasH.end(), sphMarkersD1->velMasD.begin());
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), sphMarkersD1->rhoPresMuD.begin());
thrust::copy(sphMarkersH->tauXxYyZzH.begin(), sphMarkersH->tauXxYyZzH.end(), sphMarkersD1->tauXxYyZzD.begin());
thrust::copy(sphMarkersH->tauXyXzYzH.begin(), sphMarkersH->tauXyXzYzH.end(), sphMarkersD1->tauXyXzYzD.begin());
thrust::copy(sphMarkersD1->posRadD.begin(), sphMarkersD1->posRadD.end(), sphMarkersD2->posRadD.begin());
thrust::copy(sphMarkersD1->velMasD.begin(), sphMarkersD1->velMasD.end(), sphMarkersD2->velMasD.begin());
thrust::copy(sphMarkersD1->rhoPresMuD.begin(), sphMarkersD1->rhoPresMuD.end(), sphMarkersD2->rhoPresMuD.begin());
thrust::copy(sphMarkersD1->tauXxYyZzD.begin(), sphMarkersD1->tauXxYyZzD.end(), sphMarkersD2->tauXxYyZzD.begin());
thrust::copy(sphMarkersD1->tauXyXzYzD.begin(), sphMarkersD1->tauXyXzYzD.end(), sphMarkersD2->tauXyXzYzD.begin());
fsiBodiesD1->resize(numObjects->numRigidBodies);
fsiBodiesD2->resize(numObjects->numRigidBodies);
fsiBodiesH->resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_ForcesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_TorquesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigidIdentifierD.resize(numObjects->numRigidMarkers);
fsiGeneralData->rigidSPH_MeshPos_LRF_D.resize(numObjects->numRigidMarkers);
fsiGeneralData->FlexIdentifierD.resize(numObjects->numFlexMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_D.resize(numObjects->numFlexMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_H.resize(numObjects->numFlexMarkers);
fsiGeneralData->CableElementsNodes.resize(fsiGeneralData->CableElementsNodesH.size());
fsiGeneralData->ShellElementsNodes.resize(fsiGeneralData->ShellElementsNodesH.size());
thrust::copy(fsiGeneralData->CableElementsNodesH.begin(), fsiGeneralData->CableElementsNodesH.end(),
fsiGeneralData->CableElementsNodes.begin());
thrust::copy(fsiGeneralData->ShellElementsNodesH.begin(), fsiGeneralData->ShellElementsNodesH.end(),
fsiGeneralData->ShellElementsNodes.begin());
fsiMeshD->resize(numObjects->numFlexNodes);
fsiMeshH->resize(numObjects->numFlexNodes);
fsiGeneralData->Flex_FSI_ForcesD.resize(numObjects->numFlexNodes);
}
//--------------------------------------------------------------------------------------------------------------------------------
struct axpby_functor {
axpby_functor(Real a, Real b) : m_a(a), m_b(b) {}
__host__ __device__ Real4 operator()(const Real4& x, const Real4& y) const { return m_a * x + m_b * y; }
const Real m_a;
const Real m_b;
};
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces() {
const auto n = numObjects->numFluidMarkers;
// Copy data for SPH particles only
thrust::device_vector<Real4> dvD(n);
thrust::copy_n(fsiGeneralData->derivVelRhoD.begin(), n, dvD.begin());
// Average dvD = beta * derivVelRhoD + (1-beta) * derivVelRhoD_old
Real beta = paramsH->Beta;
thrust::transform(dvD.begin(), dvD.end(), fsiGeneralData->derivVelRhoD_old.begin(), dvD.begin(),
axpby_functor(beta, 1 - beta));
return dvD;
}
//--------------------------------------------------------------------------------------------------------------------------------
struct in_box {
in_box() {}
__device__ bool operator()(const Real4 v) {
// Convert location in box frame
auto d = mR3(v) - pos;
auto w = mR3( //
ax.x * d.x + ax.y * d.y + ax.z * d.z, //
ay.x * d.x + ay.y * d.y + ay.z * d.z, //
az.x * d.x + az.y * d.y + az.z * d.z //
);
// Check w between all box limits
return (w.x >= -hsize.x && w.x <= +hsize.x) && (w.y >= -hsize.y && w.y <= +hsize.y) &&
(w.z >= -hsize.z && w.z <= +hsize.z);
}
Real3 hsize;
Real3 pos;
Real3 ax;
Real3 ay;
Real3 az;
};
thrust::device_vector<int> ChSystemFsi_impl::FindParticlesInBox(const Real3& hsize,
const Real3& pos,
const Real3& ax,
const Real3& ay,
const Real3& az) {
// Extract indices of SPH particles contained in the OBB
auto& ref = fsiGeneralData->referenceArray;
auto& pos_D = sphMarkersD2->posRadD;
// Find start and end locations for SPH particles (exclude ghost and BCE markers)
int haveHelper = (ref[0].z == -3) ? 1 : 0;
int haveGhost = (ref[0].z == -2 || ref[1].z == -2) ? 1 : 0;
auto sph_start = ref[haveHelper + haveGhost].x;
auto sph_end = ref[haveHelper + haveGhost].y;
auto num_sph = sph_end - sph_start;
// Preallocate output vector of indices
thrust::device_vector<int> indices_D(num_sph);
// Extract indices of SPH particles inside OBB
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last(num_sph);
in_box predicate;
predicate.hsize = hsize;
predicate.pos = pos;
predicate.ax = ax;
predicate.ay = ay;
predicate.az = az;
auto end = thrust::copy_if(thrust::device, // execution policy
first, last, // range of all particle indices
pos_D.begin(), // stencil vector
indices_D.begin(), // beginning of destination
predicate // predicate for stencil elements
);
// Trim the output vector of indices
size_t num_active = (size_t)(end - indices_D.begin());
indices_D.resize(num_active);
return indices_D;
}
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticlePositions(const thrust::device_vector<int>& indices) {
// Gather positions from particles with specified indices
const auto& allpos = sphMarkersD2->posRadD;
thrust::device_vector<Real4> pos(allpos.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allpos.begin(), // beginning of source
pos.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - pos.begin());
assert(num_active == indices.size());
pos.resize(num_active);
return pos;
}
thrust::device_vector<Real3> ChSystemFsi_impl::GetParticleVelocities(const thrust::device_vector<int>& indices) {
// Gather positions from particles with specified indices
auto allvel = sphMarkersD2->velMasD;
thrust::device_vector<Real3> vel(allvel.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allvel.begin(), // beginning of source
vel.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - vel.begin());
assert(num_active == indices.size());
vel.resize(num_active);
return vel;
}
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces(const thrust::device_vector<int>& indices) {
auto allforces = GetParticleForces();
thrust::device_vector<Real4> forces(allforces.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allforces.begin(), // beginning of source
forces.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - forces.begin());
assert(num_active == indices.size());
forces.resize(num_active);
return forces;
}
} // end namespace fsi
} // end namespace chrono
| a731f77b9c559bf5ce957a9b812bef0abb601995.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Milad Rakhsha, Arman Pazouki
// =============================================================================
//
// Implementation of FSI system that includes all subclasses for proximity and
// force calculation, and time integration.
//
// =============================================================================
#include <thrust/copy.h>
#include <thrust/gather.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/transform.h>
#include "chrono_fsi/physics/ChSystemFsi_impl.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
namespace chrono {
namespace fsi {
struct sphTypeCompEqual {
__host__ __device__ bool operator()(const Real4& o1, const Real4& o2) { return o1.w == o2.w; }
};
//---------------------------------------------------------------------------------------
zipIterSphD SphMarkerDataD::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadD.begin(), velMasD.begin(), rhoPresMuD.begin(),
tauXxYyZzD.begin(), tauXyXzYzD.begin()));
}
void SphMarkerDataD::resize(size_t s) {
posRadD.resize(s);
velMasD.resize(s);
rhoPresMuD.resize(s);
tauXxYyZzD.resize(s);
tauXyXzYzD.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterSphH SphMarkerDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(posRadH.begin(), velMasH.begin(), rhoPresMuH.begin(),
tauXxYyZzH.begin(), tauXyXzYzH.begin()));
}
// resize
void SphMarkerDataH::resize(size_t s) {
posRadH.resize(s);
velMasH.resize(s);
rhoPresMuH.resize(s);
tauXxYyZzH.resize(s);
tauXyXzYzH.resize(s);
}
//---------------------------------------------------------------------------------------
zipIterRigidD FsiBodiesDataD::iterator() {
return thrust::make_zip_iterator(
thrust::make_tuple(posRigid_fsiBodies_D.begin(), velMassRigid_fsiBodies_D.begin(), accRigid_fsiBodies_D.begin(),
q_fsiBodies_D.begin(), omegaVelLRF_fsiBodies_D.begin(), omegaAccLRF_fsiBodies_D.begin()));
}
void FsiBodiesDataD::resize(size_t s) {
posRigid_fsiBodies_D.resize(s);
velMassRigid_fsiBodies_D.resize(s);
accRigid_fsiBodies_D.resize(s);
q_fsiBodies_D.resize(s);
omegaVelLRF_fsiBodies_D.resize(s);
omegaAccLRF_fsiBodies_D.resize(s);
}
void FsiShellsDataH::resize(size_t s) {
posFlex_fsiBodies_nA_H.resize(s);
posFlex_fsiBodies_nB_H.resize(s);
posFlex_fsiBodies_nC_H.resize(s);
posFlex_fsiBodies_nD_H.resize(s);
velFlex_fsiBodies_nA_H.resize(s);
velFlex_fsiBodies_nB_H.resize(s);
velFlex_fsiBodies_nC_H.resize(s);
velFlex_fsiBodies_nD_H.resize(s);
accFlex_fsiBodies_nA_H.resize(s);
accFlex_fsiBodies_nB_H.resize(s);
accFlex_fsiBodies_nC_H.resize(s);
accFlex_fsiBodies_nD_H.resize(s);
}
void FsiShellsDataD::resize(size_t s) {
posFlex_fsiBodies_nA_D.resize(s);
posFlex_fsiBodies_nB_D.resize(s);
posFlex_fsiBodies_nC_D.resize(s);
posFlex_fsiBodies_nD_D.resize(s);
velFlex_fsiBodies_nA_D.resize(s);
velFlex_fsiBodies_nB_D.resize(s);
velFlex_fsiBodies_nC_D.resize(s);
velFlex_fsiBodies_nD_D.resize(s);
accFlex_fsiBodies_nA_D.resize(s);
accFlex_fsiBodies_nB_D.resize(s);
accFlex_fsiBodies_nC_D.resize(s);
accFlex_fsiBodies_nD_D.resize(s);
}
void FsiMeshDataH::resize(size_t s) {
pos_fsi_fea_H.resize(s);
vel_fsi_fea_H.resize(s);
acc_fsi_fea_H.resize(s);
}
void FsiMeshDataD::resize(size_t s) {
pos_fsi_fea_D.resize(s);
vel_fsi_fea_D.resize(s);
acc_fsi_fea_D.resize(s);
}
void FsiBodiesDataD::CopyFromH(const FsiBodiesDataH& other) {
thrust::copy(other.posRigid_fsiBodies_H.begin(), other.posRigid_fsiBodies_H.end(), posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_H.begin(), other.velMassRigid_fsiBodies_H.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_H.begin(), other.accRigid_fsiBodies_H.end(), accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_H.begin(), other.q_fsiBodies_H.end(), q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_H.begin(), other.omegaVelLRF_fsiBodies_H.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_H.begin(), other.omegaAccLRF_fsiBodies_H.end(),
omegaAccLRF_fsiBodies_D.begin());
}
void FsiShellsDataD::CopyFromH(const FsiShellsDataH& other) {
thrust::copy(other.posFlex_fsiBodies_nA_H.begin(), other.posFlex_fsiBodies_nA_H.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_H.begin(), other.posFlex_fsiBodies_nB_H.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_H.begin(), other.posFlex_fsiBodies_nC_H.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_H.begin(), other.posFlex_fsiBodies_nD_H.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_H.begin(), other.velFlex_fsiBodies_nA_H.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_H.begin(), other.velFlex_fsiBodies_nB_H.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_H.begin(), other.velFlex_fsiBodies_nC_H.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_H.begin(), other.velFlex_fsiBodies_nD_H.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_H.begin(), other.accFlex_fsiBodies_nA_H.end(),
accFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_H.begin(), other.accFlex_fsiBodies_nB_H.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_H.begin(), other.accFlex_fsiBodies_nC_H.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_H.begin(), other.accFlex_fsiBodies_nD_H.end(),
accFlex_fsiBodies_nD_D.begin());
}
void FsiMeshDataD::CopyFromH(const FsiMeshDataH& other) {
thrust::copy(other.pos_fsi_fea_H.begin(), other.pos_fsi_fea_H.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_H.begin(), other.vel_fsi_fea_H.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_H.begin(), other.acc_fsi_fea_H.end(), acc_fsi_fea_D.begin());
}
FsiBodiesDataD& FsiBodiesDataD::operator=(const FsiBodiesDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posRigid_fsiBodies_D.begin(), other.posRigid_fsiBodies_D.end(), posRigid_fsiBodies_D.begin());
thrust::copy(other.velMassRigid_fsiBodies_D.begin(), other.velMassRigid_fsiBodies_D.end(),
velMassRigid_fsiBodies_D.begin());
thrust::copy(other.accRigid_fsiBodies_D.begin(), other.accRigid_fsiBodies_D.end(), accRigid_fsiBodies_D.begin());
thrust::copy(other.q_fsiBodies_D.begin(), other.q_fsiBodies_D.end(), q_fsiBodies_D.begin());
thrust::copy(other.omegaVelLRF_fsiBodies_D.begin(), other.omegaVelLRF_fsiBodies_D.end(),
omegaVelLRF_fsiBodies_D.begin());
thrust::copy(other.omegaAccLRF_fsiBodies_D.begin(), other.omegaAccLRF_fsiBodies_D.end(),
omegaAccLRF_fsiBodies_D.begin());
return *this;
}
FsiShellsDataD& FsiShellsDataD::operator=(const FsiShellsDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.posFlex_fsiBodies_nA_D.begin(), other.posFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.posFlex_fsiBodies_nB_D.begin(), other.posFlex_fsiBodies_nB_D.end(),
posFlex_fsiBodies_nB_D.begin());
thrust::copy(other.posFlex_fsiBodies_nC_D.begin(), other.posFlex_fsiBodies_nC_D.end(),
posFlex_fsiBodies_nC_D.begin());
thrust::copy(other.posFlex_fsiBodies_nD_D.begin(), other.posFlex_fsiBodies_nD_D.end(),
posFlex_fsiBodies_nD_D.begin());
thrust::copy(other.velFlex_fsiBodies_nA_D.begin(), other.velFlex_fsiBodies_nA_D.end(),
velFlex_fsiBodies_nA_D.begin());
thrust::copy(other.velFlex_fsiBodies_nB_D.begin(), other.velFlex_fsiBodies_nB_D.end(),
velFlex_fsiBodies_nB_D.begin());
thrust::copy(other.velFlex_fsiBodies_nC_D.begin(), other.velFlex_fsiBodies_nC_D.end(),
velFlex_fsiBodies_nC_D.begin());
thrust::copy(other.velFlex_fsiBodies_nD_D.begin(), other.velFlex_fsiBodies_nD_D.end(),
velFlex_fsiBodies_nD_D.begin());
thrust::copy(other.accFlex_fsiBodies_nA_D.begin(), other.accFlex_fsiBodies_nA_D.end(),
posFlex_fsiBodies_nA_D.begin());
thrust::copy(other.accFlex_fsiBodies_nB_D.begin(), other.accFlex_fsiBodies_nB_D.end(),
accFlex_fsiBodies_nB_D.begin());
thrust::copy(other.accFlex_fsiBodies_nC_D.begin(), other.accFlex_fsiBodies_nC_D.end(),
accFlex_fsiBodies_nC_D.begin());
thrust::copy(other.accFlex_fsiBodies_nD_D.begin(), other.accFlex_fsiBodies_nD_D.end(),
accFlex_fsiBodies_nD_D.begin());
return *this;
}
FsiMeshDataD& FsiMeshDataD::operator=(const FsiMeshDataD& other) {
if (this == &other) {
return *this;
}
thrust::copy(other.pos_fsi_fea_D.begin(), other.pos_fsi_fea_D.end(), pos_fsi_fea_D.begin());
thrust::copy(other.vel_fsi_fea_D.begin(), other.vel_fsi_fea_D.end(), vel_fsi_fea_D.begin());
thrust::copy(other.acc_fsi_fea_D.begin(), other.acc_fsi_fea_D.end(), acc_fsi_fea_D.begin());
return *this;
}
//---------------------------------------------------------------------------------------
zipIterRigidH FsiBodiesDataH::iterator() {
return thrust::make_zip_iterator(
thrust::make_tuple(posRigid_fsiBodies_H.begin(), velMassRigid_fsiBodies_H.begin(), accRigid_fsiBodies_H.begin(),
q_fsiBodies_H.begin(), omegaVelLRF_fsiBodies_H.begin(), omegaAccLRF_fsiBodies_H.begin()));
}
void FsiBodiesDataH::resize(size_t s) {
posRigid_fsiBodies_H.resize(s);
velMassRigid_fsiBodies_H.resize(s);
accRigid_fsiBodies_H.resize(s);
q_fsiBodies_H.resize(s);
omegaVelLRF_fsiBodies_H.resize(s);
omegaAccLRF_fsiBodies_H.resize(s);
}
//---------------------------------------------------------------------------------------
void ProximityDataD::resize(size_t s) {
gridMarkerHashD.resize(s);
gridMarkerIndexD.resize(s);
mapOriginalToSorted.resize(s);
}
//---------------------------------------------------------------------------------------
ChronoBodiesDataH::ChronoBodiesDataH(size_t s) {
resize(s);
}
ChronoShellsDataH::ChronoShellsDataH(size_t s) {
resize(s);
}
ChronoMeshDataH::ChronoMeshDataH(size_t s) {
resize(s);
}
zipIterChronoBodiesH ChronoBodiesDataH::iterator() {
return thrust::make_zip_iterator(thrust::make_tuple(pos_ChSystemH.begin(), vel_ChSystemH.begin(),
acc_ChSystemH.begin(), quat_ChSystemH.begin(),
omegaVelGRF_ChSystemH.begin(), omegaAccGRF_ChSystemH.begin()));
}
void ChronoBodiesDataH::resize(size_t s) {
pos_ChSystemH.resize(s);
vel_ChSystemH.resize(s);
acc_ChSystemH.resize(s);
quat_ChSystemH.resize(s);
omegaVelGRF_ChSystemH.resize(s);
omegaAccGRF_ChSystemH.resize(s);
}
void ChronoShellsDataH::resize(size_t s) {
posFlex_ChSystemH_nA_H.resize(s);
posFlex_ChSystemH_nB_H.resize(s);
posFlex_ChSystemH_nC_H.resize(s);
posFlex_ChSystemH_nD_H.resize(s);
velFlex_ChSystemH_nA_H.resize(s);
velFlex_ChSystemH_nB_H.resize(s);
velFlex_ChSystemH_nC_H.resize(s);
velFlex_ChSystemH_nD_H.resize(s);
accFlex_ChSystemH_nA_H.resize(s);
accFlex_ChSystemH_nB_H.resize(s);
accFlex_ChSystemH_nC_H.resize(s);
accFlex_ChSystemH_nD_H.resize(s);
}
void ChronoMeshDataH::resize(size_t s) {
posFlex_ChSystemH_H.resize(s);
velFlex_ChSystemH_H.resize(s);
accFlex_ChSystemH_H.resize(s);
}
//---------------------------------------------------------------------------------------
ChSystemFsi_impl::ChSystemFsi_impl(std::shared_ptr<SimParams> params) : paramsH(params) {
numObjects = chrono_types::make_shared<ChCounters>();
InitNumObjects();
sphMarkersD1 = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersD2 = chrono_types::make_shared<SphMarkerDataD>();
sortedSphMarkersD = chrono_types::make_shared<SphMarkerDataD>();
sphMarkersH = chrono_types::make_shared<SphMarkerDataH>();
fsiBodiesD1 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesD2 = chrono_types::make_shared<FsiBodiesDataD>();
fsiBodiesH = chrono_types::make_shared<FsiBodiesDataH>();
fsiMeshD = chrono_types::make_shared<FsiMeshDataD>();
fsiMeshH = chrono_types::make_shared<FsiMeshDataH>();
fsiGeneralData = chrono_types::make_shared<FsiGeneralData>();
markersProximityD = chrono_types::make_shared<ProximityDataD>();
}
ChSystemFsi_impl::~ChSystemFsi_impl() {}
void ChSystemFsi_impl::AddSPHParticle(Real4 pos, Real4 rhoPresMu, Real3 vel, Real3 tauXxYyZz, Real3 tauXyXzYz) {
sphMarkersH->posRadH.push_back(pos);
sphMarkersH->velMasH.push_back(vel);
sphMarkersH->rhoPresMuH.push_back(rhoPresMu);
sphMarkersH->tauXyXzYzH.push_back(tauXyXzYz);
sphMarkersH->tauXxYyZzH.push_back(tauXxYyZz);
}
void ChSystemFsi_impl::ArrangeDataManager() {
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
dummyRhoPresMuH.clear();
}
void ChSystemFsi_impl::InitNumObjects() {
numObjects->numRigidBodies = 0; // Number of rigid bodies
numObjects->numFlexBodies1D = 0; // Number of 1D Flexible bodies
numObjects->numFlexBodies2D = 0; // Number of 2D Flexible bodies
numObjects->numFlexNodes = 0; // Number of FE nodes
numObjects->numGhostMarkers = 0; // Number of ghost particles
numObjects->numHelperMarkers = 0; // Number of helper particles
numObjects->numFluidMarkers = 0; // Number of fluid SPH particles
numObjects->numBoundaryMarkers = 0; // Number of boundary SPH particles
numObjects->startRigidMarkers = 0; // Start index of the rigid SPH particles
numObjects->startFlexMarkers = 0; // Start index of the flexible SPH particles
numObjects->numRigidMarkers = 0; // Number of rigid SPH particles
numObjects->numFlexMarkers = 0; // Number of flexible SPH particles
numObjects->numAllMarkers = 0; // Total number of SPH particles
}
void ChSystemFsi_impl::CalcNumObjects() {
InitNumObjects();
size_t rSize = fsiGeneralData->referenceArray.size();
for (size_t i = 0; i < rSize; i++) {
int4 rComp4 = fsiGeneralData->referenceArray[i];
int numMarkers = rComp4.y - rComp4.x;
switch (rComp4.z) {
case -3:
numObjects->numHelperMarkers += numMarkers;
break;
case -2:
numObjects->numGhostMarkers += numMarkers;
break;
case -1:
numObjects->numFluidMarkers += numMarkers;
break;
case 0:
numObjects->numBoundaryMarkers += numMarkers;
break;
case 1:
numObjects->numRigidMarkers += numMarkers;
numObjects->numRigidBodies++;
break;
case 2:
numObjects->numFlexMarkers += numMarkers;
numObjects->numFlexBodies1D++;
break;
case 3:
numObjects->numFlexMarkers += numMarkers;
numObjects->numFlexBodies2D++;
break;
default:
std::cerr << "ERROR (CalcNumObjects): particle type not defined." << std::endl;
throw std::runtime_error("Particle type not defined.");
break;
}
}
numObjects->numFluidMarkers += numObjects->numGhostMarkers + numObjects->numHelperMarkers;
numObjects->numAllMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers +
numObjects->numRigidMarkers + numObjects->numFlexMarkers;
numObjects->startRigidMarkers = numObjects->numFluidMarkers + numObjects->numBoundaryMarkers;
numObjects->startFlexMarkers =
numObjects->numFluidMarkers + numObjects->numBoundaryMarkers + numObjects->numRigidMarkers;
}
void ChSystemFsi_impl::ConstructReferenceArray() {
auto numAllMarkers = sphMarkersH->rhoPresMuH.size();
thrust::host_vector<int> numComponentMarkers(numAllMarkers);
thrust::fill(numComponentMarkers.begin(), numComponentMarkers.end(), 1);
thrust::host_vector<Real4> dummyRhoPresMuH = sphMarkersH->rhoPresMuH;
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), dummyRhoPresMuH.begin());
size_t numberOfComponents =
(thrust::reduce_by_key(dummyRhoPresMuH.begin(), dummyRhoPresMuH.end(), numComponentMarkers.begin(),
dummyRhoPresMuH.begin(), numComponentMarkers.begin(), sphTypeCompEqual()))
.first -
dummyRhoPresMuH.begin();
dummyRhoPresMuH.resize(numberOfComponents);
numComponentMarkers.resize(numberOfComponents);
fsiGeneralData->referenceArray.clear();
fsiGeneralData->referenceArray_FEA.clear();
// Loop through all components loading referenceArray and referenceArray_FEA
int start_index = 0;
for (size_t i = 0; i < numberOfComponents; i++) {
int compType = (int)std::floor(dummyRhoPresMuH[i].w + .1);
int phaseType = -1;
if (compType == -3) {
phaseType = -1; // For helper
} else if (compType == -2) {
phaseType = -1; // For ghost
} else if (compType == -1) {
phaseType = -1; // For fluid/granular
} else if (compType == 0) {
phaseType = 0; // For boundary
} else if (compType == 1) {
phaseType = 1; // For rigid
} else if (compType == 2) {
phaseType = 1; // For 1D cable elements
} else if (compType == 3) {
phaseType = 1; // For 2D shell elements
} else {
phaseType = 1;
}
auto new_entry = mI4(start_index, start_index + numComponentMarkers[i], compType, phaseType);
start_index += numComponentMarkers[i];
fsiGeneralData->referenceArray.push_back(new_entry);
if (compType == 2 || compType == 3)
fsiGeneralData->referenceArray_FEA.push_back(new_entry);
}
dummyRhoPresMuH.clear();
numComponentMarkers.clear();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChSystemFsi_impl::ResizeData(size_t numRigidBodies,
size_t numFlexBodies1D,
size_t numFlexBodies2D,
size_t numFlexNodes) {
ConstructReferenceArray();
CalcNumObjects();
if (numObjects->numAllMarkers != sphMarkersH->rhoPresMuH.size()) {
std::cerr << "ERROR (ResizeData): mismatch in total number of markers." << std::endl;
throw std::runtime_error("Mismatch in total number of markers.");
}
// Set number of interface objects
numObjects->numRigidBodies = numRigidBodies;
numObjects->numFlexBodies1D = numFlexBodies1D;
numObjects->numFlexBodies2D = numFlexBodies2D;
numObjects->numFlexNodes = numFlexNodes;
sphMarkersD1->resize(numObjects->numAllMarkers);
sphMarkersD2->resize(numObjects->numAllMarkers);
sortedSphMarkersD->resize(numObjects->numAllMarkers);
sphMarkersH->resize(numObjects->numAllMarkers);
markersProximityD->resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivVelRhoD_old.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXxYyZzD.resize(numObjects->numAllMarkers);
fsiGeneralData->derivTauXyXzYzD.resize(numObjects->numAllMarkers);
fsiGeneralData->vel_XSPH_D.resize(numObjects->numAllMarkers);
fsiGeneralData->vis_vel_SPH_D.resize(numObjects->numAllMarkers, mR3(1e-20));
fsiGeneralData->sr_tau_I_mu_i.resize(numObjects->numAllMarkers, mR4(1e-20));
fsiGeneralData->activityIdentifierD.resize(numObjects->numAllMarkers, 1);
fsiGeneralData->extendedActivityIdD.resize(numObjects->numAllMarkers, 1);
fsiGeneralData->freeSurfaceIdD.resize(numObjects->numAllMarkers, 0);
thrust::copy(sphMarkersH->posRadH.begin(), sphMarkersH->posRadH.end(), sphMarkersD1->posRadD.begin());
thrust::copy(sphMarkersH->velMasH.begin(), sphMarkersH->velMasH.end(), sphMarkersD1->velMasD.begin());
thrust::copy(sphMarkersH->rhoPresMuH.begin(), sphMarkersH->rhoPresMuH.end(), sphMarkersD1->rhoPresMuD.begin());
thrust::copy(sphMarkersH->tauXxYyZzH.begin(), sphMarkersH->tauXxYyZzH.end(), sphMarkersD1->tauXxYyZzD.begin());
thrust::copy(sphMarkersH->tauXyXzYzH.begin(), sphMarkersH->tauXyXzYzH.end(), sphMarkersD1->tauXyXzYzD.begin());
thrust::copy(sphMarkersD1->posRadD.begin(), sphMarkersD1->posRadD.end(), sphMarkersD2->posRadD.begin());
thrust::copy(sphMarkersD1->velMasD.begin(), sphMarkersD1->velMasD.end(), sphMarkersD2->velMasD.begin());
thrust::copy(sphMarkersD1->rhoPresMuD.begin(), sphMarkersD1->rhoPresMuD.end(), sphMarkersD2->rhoPresMuD.begin());
thrust::copy(sphMarkersD1->tauXxYyZzD.begin(), sphMarkersD1->tauXxYyZzD.end(), sphMarkersD2->tauXxYyZzD.begin());
thrust::copy(sphMarkersD1->tauXyXzYzD.begin(), sphMarkersD1->tauXyXzYzD.end(), sphMarkersD2->tauXyXzYzD.begin());
fsiBodiesD1->resize(numObjects->numRigidBodies);
fsiBodiesD2->resize(numObjects->numRigidBodies);
fsiBodiesH->resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_ForcesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigid_FSI_TorquesD.resize(numObjects->numRigidBodies);
fsiGeneralData->rigidIdentifierD.resize(numObjects->numRigidMarkers);
fsiGeneralData->rigidSPH_MeshPos_LRF_D.resize(numObjects->numRigidMarkers);
fsiGeneralData->FlexIdentifierD.resize(numObjects->numFlexMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_D.resize(numObjects->numFlexMarkers);
fsiGeneralData->FlexSPH_MeshPos_LRF_H.resize(numObjects->numFlexMarkers);
fsiGeneralData->CableElementsNodes.resize(fsiGeneralData->CableElementsNodesH.size());
fsiGeneralData->ShellElementsNodes.resize(fsiGeneralData->ShellElementsNodesH.size());
thrust::copy(fsiGeneralData->CableElementsNodesH.begin(), fsiGeneralData->CableElementsNodesH.end(),
fsiGeneralData->CableElementsNodes.begin());
thrust::copy(fsiGeneralData->ShellElementsNodesH.begin(), fsiGeneralData->ShellElementsNodesH.end(),
fsiGeneralData->ShellElementsNodes.begin());
fsiMeshD->resize(numObjects->numFlexNodes);
fsiMeshH->resize(numObjects->numFlexNodes);
fsiGeneralData->Flex_FSI_ForcesD.resize(numObjects->numFlexNodes);
}
//--------------------------------------------------------------------------------------------------------------------------------
struct axpby_functor {
axpby_functor(Real a, Real b) : m_a(a), m_b(b) {}
__host__ __device__ Real4 operator()(const Real4& x, const Real4& y) const { return m_a * x + m_b * y; }
const Real m_a;
const Real m_b;
};
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces() {
const auto n = numObjects->numFluidMarkers;
// Copy data for SPH particles only
thrust::device_vector<Real4> dvD(n);
thrust::copy_n(fsiGeneralData->derivVelRhoD.begin(), n, dvD.begin());
// Average dvD = beta * derivVelRhoD + (1-beta) * derivVelRhoD_old
Real beta = paramsH->Beta;
thrust::transform(dvD.begin(), dvD.end(), fsiGeneralData->derivVelRhoD_old.begin(), dvD.begin(),
axpby_functor(beta, 1 - beta));
return dvD;
}
//--------------------------------------------------------------------------------------------------------------------------------
struct in_box {
in_box() {}
__device__ bool operator()(const Real4 v) {
// Convert location in box frame
auto d = mR3(v) - pos;
auto w = mR3( //
ax.x * d.x + ax.y * d.y + ax.z * d.z, //
ay.x * d.x + ay.y * d.y + ay.z * d.z, //
az.x * d.x + az.y * d.y + az.z * d.z //
);
// Check w between all box limits
return (w.x >= -hsize.x && w.x <= +hsize.x) && (w.y >= -hsize.y && w.y <= +hsize.y) &&
(w.z >= -hsize.z && w.z <= +hsize.z);
}
Real3 hsize;
Real3 pos;
Real3 ax;
Real3 ay;
Real3 az;
};
thrust::device_vector<int> ChSystemFsi_impl::FindParticlesInBox(const Real3& hsize,
const Real3& pos,
const Real3& ax,
const Real3& ay,
const Real3& az) {
// Extract indices of SPH particles contained in the OBB
auto& ref = fsiGeneralData->referenceArray;
auto& pos_D = sphMarkersD2->posRadD;
// Find start and end locations for SPH particles (exclude ghost and BCE markers)
int haveHelper = (ref[0].z == -3) ? 1 : 0;
int haveGhost = (ref[0].z == -2 || ref[1].z == -2) ? 1 : 0;
auto sph_start = ref[haveHelper + haveGhost].x;
auto sph_end = ref[haveHelper + haveGhost].y;
auto num_sph = sph_end - sph_start;
// Preallocate output vector of indices
thrust::device_vector<int> indices_D(num_sph);
// Extract indices of SPH particles inside OBB
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last(num_sph);
in_box predicate;
predicate.hsize = hsize;
predicate.pos = pos;
predicate.ax = ax;
predicate.ay = ay;
predicate.az = az;
auto end = thrust::copy_if(thrust::device, // execution policy
first, last, // range of all particle indices
pos_D.begin(), // stencil vector
indices_D.begin(), // beginning of destination
predicate // predicate for stencil elements
);
// Trim the output vector of indices
size_t num_active = (size_t)(end - indices_D.begin());
indices_D.resize(num_active);
return indices_D;
}
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticlePositions(const thrust::device_vector<int>& indices) {
// Gather positions from particles with specified indices
const auto& allpos = sphMarkersD2->posRadD;
thrust::device_vector<Real4> pos(allpos.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allpos.begin(), // beginning of source
pos.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - pos.begin());
assert(num_active == indices.size());
pos.resize(num_active);
return pos;
}
thrust::device_vector<Real3> ChSystemFsi_impl::GetParticleVelocities(const thrust::device_vector<int>& indices) {
// Gather positions from particles with specified indices
auto allvel = sphMarkersD2->velMasD;
thrust::device_vector<Real3> vel(allvel.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allvel.begin(), // beginning of source
vel.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - vel.begin());
assert(num_active == indices.size());
vel.resize(num_active);
return vel;
}
thrust::device_vector<Real4> ChSystemFsi_impl::GetParticleForces(const thrust::device_vector<int>& indices) {
auto allforces = GetParticleForces();
thrust::device_vector<Real4> forces(allforces.size());
auto end = thrust::gather(thrust::device, // execution policy
indices.begin(), indices.end(), // range of gather locations
allforces.begin(), // beginning of source
forces.begin() // beginning of destination
);
// Trim the output vector of particle positions
size_t num_active = (size_t)(end - forces.begin());
assert(num_active == indices.size());
forces.resize(num_active);
return forces;
}
} // end namespace fsi
} // end namespace chrono
|
26837e8c98cf69c72d64b4fa1b407b5e76c07077.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CUDA_BLOCK_X 128
#define CUDA_BLOCK_Y 1
#define CUDA_BLOCK_Z 1
__global__ void _auto_kernel_2(int a[5][5],int b[5][5],int i)
{
int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x;
int thread_y_id;thread_y_id = blockIdx.y * blockDim.y + threadIdx.y;
if (thread_x_id && thread_y_id)
if (thread_x_id <= 5 && thread_y_id <= 5) {
b[i][1 * thread_y_id + -1] = a[i][1 * thread_y_id + -1];
}
}
__global__ void _auto_kernel_1(int b[5][5],int i)
{
int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_x_id)
if (thread_x_id <= 5) {
b[i][0] = 1;
}
}
__global__ void _auto_kernel_0(int a[5][5])
{
int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_x_id)
if (thread_x_id <= 5) {
a[1 * thread_x_id + -1][1 * thread_x_id + -1] = 1;
}
}
int main()
{
int j;
int i_nom_2;
int i_nom_1;
int i;
int a[5][5];
int b[5][5];
int y;
{
{
/* Auto-generated code for call to _auto_kernel_0 */
typedef int _narray_a[5];
_narray_a *d_a;
hipMalloc((void **) &d_a, sizeof(int ) * 5 * 5);
hipMemcpy(d_a, a, sizeof(int ) * 5 * 5, hipMemcpyHostToDevice);
int CUDA_GRID_X;
CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X;
int CUDA_GRID_Y;
CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y;
int CUDA_GRID_Z;
CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z;
const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z);
const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z);
hipLaunchKernelGGL(( _auto_kernel_0), dim3(CUDA_gridSize),dim3(CUDA_blockSize), 0, 0, d_a);
hipMemcpy(a, d_a, sizeof(int ) * 5 * 5, hipMemcpyDeviceToHost);
}
{
/* Auto-generated code for call to _auto_kernel_1 */
typedef int _narray_b[5];
_narray_b *d_b;
hipMalloc((void **) &d_b, sizeof(int ) * 5 * 5);
hipMemcpy(d_b, b, sizeof(int ) * 5 * 5, hipMemcpyHostToDevice);
int CUDA_GRID_X;
CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X;
int CUDA_GRID_Y;
CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y;
int CUDA_GRID_Z;
CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z;
const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z);
const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z);
hipLaunchKernelGGL(( _auto_kernel_1), dim3(CUDA_gridSize),dim3(CUDA_blockSize), 0, 0, d_b, i);
hipMemcpy(b, d_b, sizeof(int ) * 5 * 5, hipMemcpyDeviceToHost);
}
{
/* Auto-generated code for call to _auto_kernel_2 */
typedef int _narray_a[5];
_narray_a *d_a;
hipMalloc((void **) &d_a, sizeof(int ) * 5 * 5);
hipMemcpy(d_a, a, sizeof(int ) * 5 * 5, hipMemcpyHostToDevice);
typedef int _narray_b[5];
_narray_b *d_b;
hipMalloc((void **) &d_b, sizeof(int ) * 5 * 5);
hipMemcpy(d_b, b, sizeof(int ) * 5 * 5, hipMemcpyHostToDevice);
int CUDA_GRID_X;
CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X;
int CUDA_GRID_Y;
CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y;
int CUDA_GRID_Z;
CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z;
const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z);
const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z);
hipLaunchKernelGGL(( _auto_kernel_2), dim3(CUDA_gridSize),dim3(CUDA_blockSize), 0, 0, d_a, d_b, i);
hipMemcpy(a, d_a, sizeof(int ) * 5 * 5, hipMemcpyDeviceToHost);
hipMemcpy(b, d_b, sizeof(int ) * 5 * 5, hipMemcpyDeviceToHost);
}
}
/* This should NOT be convertible */
for (int i = 0; i < 5; i++) {
a[i][i] = b[i][i];
for (int j = 0; j < 5; j++)
b[i][j] = a[i][j];
}
return 0;
}
| 26837e8c98cf69c72d64b4fa1b407b5e76c07077.cu | #define CUDA_BLOCK_X 128
#define CUDA_BLOCK_Y 1
#define CUDA_BLOCK_Z 1
__global__ void _auto_kernel_2(int a[5][5],int b[5][5],int i)
{
int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x;
int thread_y_id;thread_y_id = blockIdx.y * blockDim.y + threadIdx.y;
if (thread_x_id && thread_y_id)
if (thread_x_id <= 5 && thread_y_id <= 5) {
b[i][1 * thread_y_id + -1] = a[i][1 * thread_y_id + -1];
}
}
__global__ void _auto_kernel_1(int b[5][5],int i)
{
int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_x_id)
if (thread_x_id <= 5) {
b[i][0] = 1;
}
}
__global__ void _auto_kernel_0(int a[5][5])
{
int thread_x_id;thread_x_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_x_id)
if (thread_x_id <= 5) {
a[1 * thread_x_id + -1][1 * thread_x_id + -1] = 1;
}
}
int main()
{
int j;
int i_nom_2;
int i_nom_1;
int i;
int a[5][5];
int b[5][5];
int y;
{
{
/* Auto-generated code for call to _auto_kernel_0 */
typedef int _narray_a[5];
_narray_a *d_a;
cudaMalloc((void **) &d_a, sizeof(int ) * 5 * 5);
cudaMemcpy(d_a, a, sizeof(int ) * 5 * 5, cudaMemcpyHostToDevice);
int CUDA_GRID_X;
CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X;
int CUDA_GRID_Y;
CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y;
int CUDA_GRID_Z;
CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z;
const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z);
const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z);
_auto_kernel_0<<<CUDA_gridSize,CUDA_blockSize>>>(d_a);
cudaMemcpy(a, d_a, sizeof(int ) * 5 * 5, cudaMemcpyDeviceToHost);
}
{
/* Auto-generated code for call to _auto_kernel_1 */
typedef int _narray_b[5];
_narray_b *d_b;
cudaMalloc((void **) &d_b, sizeof(int ) * 5 * 5);
cudaMemcpy(d_b, b, sizeof(int ) * 5 * 5, cudaMemcpyHostToDevice);
int CUDA_GRID_X;
CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X;
int CUDA_GRID_Y;
CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y;
int CUDA_GRID_Z;
CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z;
const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z);
const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z);
_auto_kernel_1<<<CUDA_gridSize,CUDA_blockSize>>>(d_b, i);
cudaMemcpy(b, d_b, sizeof(int ) * 5 * 5, cudaMemcpyDeviceToHost);
}
{
/* Auto-generated code for call to _auto_kernel_2 */
typedef int _narray_a[5];
_narray_a *d_a;
cudaMalloc((void **) &d_a, sizeof(int ) * 5 * 5);
cudaMemcpy(d_a, a, sizeof(int ) * 5 * 5, cudaMemcpyHostToDevice);
typedef int _narray_b[5];
_narray_b *d_b;
cudaMalloc((void **) &d_b, sizeof(int ) * 5 * 5);
cudaMemcpy(d_b, b, sizeof(int ) * 5 * 5, cudaMemcpyHostToDevice);
int CUDA_GRID_X;
CUDA_GRID_X = (5 + CUDA_BLOCK_X - 1)/CUDA_BLOCK_X;
int CUDA_GRID_Y;
CUDA_GRID_Y = (5 + CUDA_BLOCK_Y - 1)/CUDA_BLOCK_Y;
int CUDA_GRID_Z;
CUDA_GRID_Z = (1 + CUDA_BLOCK_Z - 1)/CUDA_BLOCK_Z;
const dim3 CUDA_blockSize(CUDA_BLOCK_X, CUDA_BLOCK_Y, CUDA_BLOCK_Z);
const dim3 CUDA_gridSize(CUDA_GRID_X, CUDA_GRID_Y, CUDA_GRID_Z);
_auto_kernel_2<<<CUDA_gridSize,CUDA_blockSize>>>(d_a, d_b, i);
cudaMemcpy(a, d_a, sizeof(int ) * 5 * 5, cudaMemcpyDeviceToHost);
cudaMemcpy(b, d_b, sizeof(int ) * 5 * 5, cudaMemcpyDeviceToHost);
}
}
/* This should NOT be convertible */
for (int i = 0; i < 5; i++) {
a[i][i] = b[i][i];
for (int j = 0; j < 5; j++)
b[i][j] = a[i][j];
}
return 0;
}
|
5128afc245677973304d1a9941a704b941a5eca0.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ReduceOps.h>
namespace at { namespace native {
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void sum_kernel_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(iter, func_wrapper<out_t> ([]GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a + b;
}));
}
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void prod_kernel_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(iter, func_wrapper<out_t> ([]GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a * b;
}), 1);
}
static void sum_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == kHalf) {
return sum_kernel_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return sum_kernel_impl<at::Half, float, float>(iter);
}
#ifdef __HIP_PLATFORM_HCC__
else if (iter.dtype() == kBFloat16) {
return sum_kernel_impl<at::BFloat16, float>(iter);
} else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return sum_kernel_impl<at::BFloat16, float, float>(iter);
}
#endif
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(ScalarType::Bool, iter.dtype(), "sum_cuda", [&]() {
sum_kernel_impl<scalar_t>(iter);
});
}
static void prod_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == kHalf) {
return prod_kernel_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return prod_kernel_impl<at::Half, float, float>(iter);
}
#ifdef __HIP_PLATFORM_HCC__
else if (iter.dtype() == kBFloat16) {
return prod_kernel_impl<at::BFloat16, float>(iter);
} else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return prod_kernel_impl<at::BFloat16, float, float>(iter);
}
#endif
AT_DISPATCH_ALL_TYPES(iter.dtype(), "prod_cuda", [&]() {
prod_kernel_impl<scalar_t>(iter);
});
}
REGISTER_DISPATCH(sum_stub, &sum_kernel_cuda);
REGISTER_DISPATCH(prod_stub, &prod_kernel_cuda);
}} // namespace at::native
| 5128afc245677973304d1a9941a704b941a5eca0.cu | #include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Reduce.cuh>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/SharedReduceOps.h>
#include <ATen/Dispatch.h>
#include <ATen/native/ReduceOps.h>
namespace at { namespace native {
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void sum_kernel_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(iter, func_wrapper<out_t> ([]GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a + b;
}));
}
template <typename scalar_t, typename acc_t=scalar_t, typename out_t=scalar_t>
void prod_kernel_impl(TensorIterator& iter) {
gpu_reduce_kernel<scalar_t, out_t>(iter, func_wrapper<out_t> ([]GPU_LAMBDA(acc_t a, acc_t b) -> acc_t {
return a * b;
}), 1);
}
static void sum_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == kHalf) {
return sum_kernel_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return sum_kernel_impl<at::Half, float, float>(iter);
}
#ifdef __HIP_PLATFORM_HCC__
else if (iter.dtype() == kBFloat16) {
return sum_kernel_impl<at::BFloat16, float>(iter);
} else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return sum_kernel_impl<at::BFloat16, float, float>(iter);
}
#endif
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND(ScalarType::Bool, iter.dtype(), "sum_cuda", [&]() {
sum_kernel_impl<scalar_t>(iter);
});
}
static void prod_kernel_cuda(TensorIterator& iter) {
if (iter.dtype() == kHalf) {
return prod_kernel_impl<at::Half, float>(iter);
} else if (iter.dtype(1) == kHalf && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return prod_kernel_impl<at::Half, float, float>(iter);
}
#ifdef __HIP_PLATFORM_HCC__
else if (iter.dtype() == kBFloat16) {
return prod_kernel_impl<at::BFloat16, float>(iter);
} else if (iter.dtype(1) == kBFloat16 && iter.dtype() == kFloat) {
// type promotion that does cast and reduction in a single kernel
return prod_kernel_impl<at::BFloat16, float, float>(iter);
}
#endif
AT_DISPATCH_ALL_TYPES(iter.dtype(), "prod_cuda", [&]() {
prod_kernel_impl<scalar_t>(iter);
});
}
REGISTER_DISPATCH(sum_stub, &sum_kernel_cuda);
REGISTER_DISPATCH(prod_stub, &prod_kernel_cuda);
}} // namespace at::native
|
3b467941392b0dd4c04567bb770a6248917dda38.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "jacobi_gpu3.h"
__global__ void jacobi_kernel1(int n, double h, double * u0_old, double * u0_new, double * f, double * u1_old, double * u1_new){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i > 0 && i < (n+1)/2 && j > 0 && j < (n+1)/2){
if (i==(n+2)/2){
u0_new[i*(n + 2) + j] = 0.25*(u0_old[(i-1)*(n + 2) + j] + u1_old[j] + u0_old[i*(n + 2) + j-1] + u0_old[i*(n + 2) + j+1] + h*h*f[i*(n + 2) + j]);
}
else{
u0_new[i*(n + 2) + j] = 0.25*(u0_old[(i-1)*(n + 2) + j] + u0_old[(i+1)*(n + 2) + j] + u0_old[i*(n + 2) + j-1] + u0_old[i*(n + 2) + j+1] + h*h*f[i*(n + 2) + j]);
}
}
}
__global__ void jacobi_kernel2(int n, double h, double * u1_old, double * u1_new, double * f, double * u0_old, double * u0_new){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i > 0 && i < (n+1)/2 && j > 0 && j < (n+1)/2){
if (i == 0){
u1_new[i*(n + 2) + j] = 0.25*(u0_old[(n + 2)*(n + 2)/2 + j] + u1_old[(i+1)*(n + 2) + j] + u1_old[i*(n + 2) + j-1] + u1_old[i*(n + 2) + j+1] + h*h*f[i*(n + 2) + j]);
}
else{
u1_new[i*(n + 2) + j] = 0.25*(u1_old[(i-1)*(n + 2) + j] + u1_old[(i+1)*(n + 2) + j] + u1_old[i*(n + 2) + j-1] + u1_old[i*(n + 2) + j+1] + h*h*f[i*(n + 2) + j]);
}
}
}
| 3b467941392b0dd4c04567bb770a6248917dda38.cu | #include "jacobi_gpu3.h"
__global__ void jacobi_kernel1(int n, double h, double * u0_old, double * u0_new, double * f, double * u1_old, double * u1_new){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i > 0 && i < (n+1)/2 && j > 0 && j < (n+1)/2){
if (i==(n+2)/2){
u0_new[i*(n + 2) + j] = 0.25*(u0_old[(i-1)*(n + 2) + j] + u1_old[j] + u0_old[i*(n + 2) + j-1] + u0_old[i*(n + 2) + j+1] + h*h*f[i*(n + 2) + j]);
}
else{
u0_new[i*(n + 2) + j] = 0.25*(u0_old[(i-1)*(n + 2) + j] + u0_old[(i+1)*(n + 2) + j] + u0_old[i*(n + 2) + j-1] + u0_old[i*(n + 2) + j+1] + h*h*f[i*(n + 2) + j]);
}
}
}
__global__ void jacobi_kernel2(int n, double h, double * u1_old, double * u1_new, double * f, double * u0_old, double * u0_new){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i > 0 && i < (n+1)/2 && j > 0 && j < (n+1)/2){
if (i == 0){
u1_new[i*(n + 2) + j] = 0.25*(u0_old[(n + 2)*(n + 2)/2 + j] + u1_old[(i+1)*(n + 2) + j] + u1_old[i*(n + 2) + j-1] + u1_old[i*(n + 2) + j+1] + h*h*f[i*(n + 2) + j]);
}
else{
u1_new[i*(n + 2) + j] = 0.25*(u1_old[(i-1)*(n + 2) + j] + u1_old[(i+1)*(n + 2) + j] + u1_old[i*(n + 2) + j-1] + u1_old[i*(n + 2) + j+1] + h*h*f[i*(n + 2) + j]);
}
}
}
|
1e9da27530b5b2910b62406901c8f047db4ee09d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "../../saxpy/saxpy.c"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
//#define DEBUG 0
#define CHECK_ERR(x) \
if (x != hipSuccess) { \
fprintf(stderr,"%s in %s at line %d\n", \
hipGetErrorString(err),__FILE__,__LINE__); \
exit(-1); \
}
__global__ void image_1D_convolution(float *M, float *N, float *C, int mask_width, int width,int num_threads)
{
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
float value =0;
int start;
int index;
//this function includes 2 floating point operations
while(threadId < width)
{
start = threadId - (mask_width/2);
for(int i=0; i<mask_width;i++){
index= start + i;
if(index >=0 && index <width)
value = value + N[index] * M[i];
}
threadId = threadId + num_threads;
C[threadId] = value;
}
}
void print(float* result,int size){
printf("Printing array....\n");
for(int i=0;i<size;i++){
printf(" %f ", result[i]);
}
printf("\n");
}
int main(int argc, char *argv[]){
//mask_width, filter width
int IMAGE_WIDTH, MASK_WIDTH,NUM_THREADS,FLAG;
float *h_M, *h_N, *h_C;
float *d_M, *d_N, *d_C;
size_t size_M,size_N;
hipError_t err;
if(argc!=5)
{
printf("This test requires two parameters:\n");
printf(" int IMAGE_WIDTH, int MASK_WIDTH, int NUM_THREADS \n");
printf("where IMAGE_WIDTH is the number of pixels in an image in one dimensional\n");
printf(" MASK_WIDTH is the width of the mask to be applied on the image\n");
printf(" NUM_THREADS is the number of threads to be executed in parallel\n");
printf(" FLAG to decide flops including data copy or not. 1 for flops with data copy and 0 for only execution of gpu function.\n");
exit(1);
}
srand (time(NULL));
IMAGE_WIDTH = atoi(argv[1]);
MASK_WIDTH = atoi(argv[2]);
NUM_THREADS = atoi(argv[3]);
FLAG = atoi(argv[4]);
// allocate host
size_M = sizeof(float) * MASK_WIDTH;
size_N = sizeof(float) * IMAGE_WIDTH;
h_N = (float *) malloc(size_N);
h_M = (float *) malloc(size_M);
h_C = (float *) malloc(size_N);
// allocate device
err=hipMalloc((void **) &d_M, size_M);
CHECK_ERR(err);
err=hipMalloc((void **) &d_N, size_N);
CHECK_ERR(err);
err=hipMalloc((void **) &d_C, size_N);
CHECK_ERR(err);
// pop arrays
populateRandomFloatArray(IMAGE_WIDTH,h_N);
populateRandomFloatArray(MASK_WIDTH,h_M);
#ifdef DEBUG
print(h_N,IMAGE_WIDTH);
print(h_M, MASK_WIDTH);
#endif
// Start the timer
struct timeval tim;
double t1,t2;
if(FLAG){
gettimeofday(&tim, NULL);
t1=tim.tv_sec+(tim.tv_usec/1000000.0);
}
err = hipMemcpy(d_M,h_M,size_M,hipMemcpyHostToDevice);
CHECK_ERR(err);
err = hipMemcpy(d_N,h_N,size_N, hipMemcpyHostToDevice);
CHECK_ERR(err);
if(!FLAG){
gettimeofday(&tim, NULL);
t1=tim.tv_sec+(tim.tv_usec/1000000.0);
}
hipLaunchKernelGGL(( image_1D_convolution), dim3(1),dim3(NUM_THREADS), 0, 0, d_M,d_N,d_C,MASK_WIDTH,IMAGE_WIDTH,NUM_THREADS);
hipDeviceSynchronize();
if(!FLAG){
gettimeofday(&tim, NULL);
t2=tim.tv_sec+(tim.tv_usec/1000000.0);
}
//Copy back the results from the device
//printf("%x %x %d\n", h_C, d_C, size_N);
float * temp = (float *)malloc(size_N);
// err = hipMemcpy((void *)h_C, (void *)d_C, size_N, hipMemcpyDeviceToHost);
err = hipMemcpy((void *)temp, (void *)d_C, size_N, hipMemcpyDeviceToHost);
CHECK_ERR(err);
//printf("AFTER COPY BACK!\n");
#ifdef DEBUG
print(h_C,IMAGE_WIDTH);
#endif
// free device
hipFree(d_C);
hipFree(d_M);
hipFree(d_N);
if(FLAG){
gettimeofday(&tim, NULL);
t2=tim.tv_sec+(tim.tv_usec/1000000.0);
}
// Print timing information
printf("%.4lf\t",(t2-t1));
// free cpu
free(h_M);
free(h_N);
free(h_C);
}
| 1e9da27530b5b2910b62406901c8f047db4ee09d.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "../../saxpy/saxpy.c"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
//#define DEBUG 0
#define CHECK_ERR(x) \
if (x != cudaSuccess) { \
fprintf(stderr,"%s in %s at line %d\n", \
cudaGetErrorString(err),__FILE__,__LINE__); \
exit(-1); \
}
__global__ void image_1D_convolution(float *M, float *N, float *C, int mask_width, int width,int num_threads)
{
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
float value =0;
int start;
int index;
//this function includes 2 floating point operations
while(threadId < width)
{
start = threadId - (mask_width/2);
for(int i=0; i<mask_width;i++){
index= start + i;
if(index >=0 && index <width)
value = value + N[index] * M[i];
}
threadId = threadId + num_threads;
C[threadId] = value;
}
}
void print(float* result,int size){
printf("Printing array....\n");
for(int i=0;i<size;i++){
printf(" %f ", result[i]);
}
printf("\n");
}
int main(int argc, char *argv[]){
//mask_width, filter width
int IMAGE_WIDTH, MASK_WIDTH,NUM_THREADS,FLAG;
float *h_M, *h_N, *h_C;
float *d_M, *d_N, *d_C;
size_t size_M,size_N;
cudaError_t err;
if(argc!=5)
{
printf("This test requires two parameters:\n");
printf(" int IMAGE_WIDTH, int MASK_WIDTH, int NUM_THREADS \n");
printf("where IMAGE_WIDTH is the number of pixels in an image in one dimensional\n");
printf(" MASK_WIDTH is the width of the mask to be applied on the image\n");
printf(" NUM_THREADS is the number of threads to be executed in parallel\n");
printf(" FLAG to decide flops including data copy or not. 1 for flops with data copy and 0 for only execution of gpu function.\n");
exit(1);
}
srand (time(NULL));
IMAGE_WIDTH = atoi(argv[1]);
MASK_WIDTH = atoi(argv[2]);
NUM_THREADS = atoi(argv[3]);
FLAG = atoi(argv[4]);
// allocate host
size_M = sizeof(float) * MASK_WIDTH;
size_N = sizeof(float) * IMAGE_WIDTH;
h_N = (float *) malloc(size_N);
h_M = (float *) malloc(size_M);
h_C = (float *) malloc(size_N);
// allocate device
err=cudaMalloc((void **) &d_M, size_M);
CHECK_ERR(err);
err=cudaMalloc((void **) &d_N, size_N);
CHECK_ERR(err);
err=cudaMalloc((void **) &d_C, size_N);
CHECK_ERR(err);
// pop arrays
populateRandomFloatArray(IMAGE_WIDTH,h_N);
populateRandomFloatArray(MASK_WIDTH,h_M);
#ifdef DEBUG
print(h_N,IMAGE_WIDTH);
print(h_M, MASK_WIDTH);
#endif
// Start the timer
struct timeval tim;
double t1,t2;
if(FLAG){
gettimeofday(&tim, NULL);
t1=tim.tv_sec+(tim.tv_usec/1000000.0);
}
err = cudaMemcpy(d_M,h_M,size_M,cudaMemcpyHostToDevice);
CHECK_ERR(err);
err = cudaMemcpy(d_N,h_N,size_N, cudaMemcpyHostToDevice);
CHECK_ERR(err);
if(!FLAG){
gettimeofday(&tim, NULL);
t1=tim.tv_sec+(tim.tv_usec/1000000.0);
}
image_1D_convolution<<<1,NUM_THREADS>>>(d_M,d_N,d_C,MASK_WIDTH,IMAGE_WIDTH,NUM_THREADS);
cudaDeviceSynchronize();
if(!FLAG){
gettimeofday(&tim, NULL);
t2=tim.tv_sec+(tim.tv_usec/1000000.0);
}
//Copy back the results from the device
//printf("%x %x %d\n", h_C, d_C, size_N);
float * temp = (float *)malloc(size_N);
// err = cudaMemcpy((void *)h_C, (void *)d_C, size_N, cudaMemcpyDeviceToHost);
err = cudaMemcpy((void *)temp, (void *)d_C, size_N, cudaMemcpyDeviceToHost);
CHECK_ERR(err);
//printf("AFTER COPY BACK!\n");
#ifdef DEBUG
print(h_C,IMAGE_WIDTH);
#endif
// free device
cudaFree(d_C);
cudaFree(d_M);
cudaFree(d_N);
if(FLAG){
gettimeofday(&tim, NULL);
t2=tim.tv_sec+(tim.tv_usec/1000000.0);
}
// Print timing information
printf("%.4lf\t",(t2-t1));
// free cpu
free(h_M);
free(h_N);
free(h_C);
}
|
0de43ecc7ebd5af697fc2c6b7d745e8a1d1f5b9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "HOGPadding.h"
#include "HOGUtils.h"
extern int hWidthROI, hHeightROI;
extern int hPaddedWidth, hPaddedHeight;
extern int hWidth, hHeight;
extern int hPaddingSizeX, hPaddingSizeY;
extern int avSizeX, avSizeY, marginX, marginY;
uchar4* paddedRegisteredImageU4;
__host__ void InitPadding(int hPaddedWidth, int hPaddedHeight)
{
checkCudaErrors(hipMalloc((void**) &paddedRegisteredImageU4, sizeof(uchar4) * hPaddedWidth * hPaddedHeight));
}
__host__ void ClosePadding()
{
checkCudaErrors(hipFree(paddedRegisteredImageU4));
}
__host__ void PadHostImage(uchar4* registeredImage, float4 *paddedRegisteredImage,
int minx, int miny, int maxx, int maxy)
{
hWidthROI = maxx - minx;
hHeightROI = maxy - miny;
int toaddxx = 0, toaddxy = 0, toaddyx = 0, toaddyy = 0;
if (avSizeX) { toaddxx = hWidthROI * marginX / avSizeX; toaddxy = hHeightROI * marginY / avSizeX; }
if (avSizeY) { toaddyx = hWidthROI * marginX / avSizeY; toaddyy = hHeightROI * marginY / avSizeY; }
hPaddingSizeX = max(toaddxx, toaddyx); hPaddingSizeY = max(toaddxy, toaddyy);
hPaddedWidth = hWidthROI + hPaddingSizeX*2;
hPaddedHeight = hHeightROI + hPaddingSizeY*2;
checkCudaErrors(hipMemset(paddedRegisteredImageU4, 0, sizeof(uchar4) * hPaddedWidth * hPaddedHeight));
checkCudaErrors(hipMemcpy2D(paddedRegisteredImageU4 + hPaddingSizeX + hPaddingSizeY * hPaddedWidth,
hPaddedWidth * sizeof(uchar4), registeredImage + minx + miny * hWidth,
hWidth * sizeof(uchar4), hWidthROI * sizeof(uchar4),
hHeightROI, hipMemcpyHostToDevice));
Uchar4ToFloat4(paddedRegisteredImageU4, paddedRegisteredImage, hPaddedWidth, hPaddedHeight);
}
| 0de43ecc7ebd5af697fc2c6b7d745e8a1d1f5b9d.cu | #include "HOGPadding.h"
#include "HOGUtils.h"
extern int hWidthROI, hHeightROI;
extern int hPaddedWidth, hPaddedHeight;
extern int hWidth, hHeight;
extern int hPaddingSizeX, hPaddingSizeY;
extern int avSizeX, avSizeY, marginX, marginY;
uchar4* paddedRegisteredImageU4;
__host__ void InitPadding(int hPaddedWidth, int hPaddedHeight)
{
checkCudaErrors(cudaMalloc((void**) &paddedRegisteredImageU4, sizeof(uchar4) * hPaddedWidth * hPaddedHeight));
}
__host__ void ClosePadding()
{
checkCudaErrors(cudaFree(paddedRegisteredImageU4));
}
__host__ void PadHostImage(uchar4* registeredImage, float4 *paddedRegisteredImage,
int minx, int miny, int maxx, int maxy)
{
hWidthROI = maxx - minx;
hHeightROI = maxy - miny;
int toaddxx = 0, toaddxy = 0, toaddyx = 0, toaddyy = 0;
if (avSizeX) { toaddxx = hWidthROI * marginX / avSizeX; toaddxy = hHeightROI * marginY / avSizeX; }
if (avSizeY) { toaddyx = hWidthROI * marginX / avSizeY; toaddyy = hHeightROI * marginY / avSizeY; }
hPaddingSizeX = max(toaddxx, toaddyx); hPaddingSizeY = max(toaddxy, toaddyy);
hPaddedWidth = hWidthROI + hPaddingSizeX*2;
hPaddedHeight = hHeightROI + hPaddingSizeY*2;
checkCudaErrors(cudaMemset(paddedRegisteredImageU4, 0, sizeof(uchar4) * hPaddedWidth * hPaddedHeight));
checkCudaErrors(cudaMemcpy2D(paddedRegisteredImageU4 + hPaddingSizeX + hPaddingSizeY * hPaddedWidth,
hPaddedWidth * sizeof(uchar4), registeredImage + minx + miny * hWidth,
hWidth * sizeof(uchar4), hWidthROI * sizeof(uchar4),
hHeightROI, cudaMemcpyHostToDevice));
Uchar4ToFloat4(paddedRegisteredImageU4, paddedRegisteredImage, hPaddedWidth, hPaddedHeight);
}
|
665a694095d51439d16454e4f6ec70edfc2203c7.hip | // !!! This is a file automatically generated by hipify!!!
/*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2014 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
// Modified by Gang Wang
// Modified by Andrew Fiore
#include "Stokes.cuh"
#include "Mobility.cuh"
#include "Brownian.cuh"
#include "Helper.cuh"
#include "hoomd/Saru.h"
#include "hoomd/TextureTools.h"
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
//! command to convert floats or doubles to integers
#ifdef SINGLE_PRECISION
#define __scalar2int_rd __float2int_rd
#else
#define __scalar2int_rd __double2int_rd
#endif
#ifndef __ERRCHK_CUH__
#define __ERRCHK_CUH__
//! Function to check for errors
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
/*!
\param code returned error code
\param file which file the error occured in
\param line which line error check was tripped
\param abort whether to kill code upon error trigger
*/
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#endif
/*! \file Stokes.cu
\brief Defines GPU kernel code for integration considering hydrodynamic interactions on the GPU. Used by Stokes.cc.
*/
//! Shared memory array for partial sum of dot product kernel
extern __shared__ Scalar partial_sum[];
extern __shared__ Scalar4 shared_Fpos[];
//! Texture for reading table values
scalar4_tex_t tables1_tex;
//! Texture for reading particle positions
scalar4_tex_t pos_tex;
//! Takes the integration on a group of particles
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_delu1 first 4 components of gradient of particle velocity
\param d_delu2 second 4 components of gradient of particle velocity
\param d_accel array of particle "accelerations" (This is an overdamped integrator, so accelerations don't have physical meaning)
\param d_image array of particle images
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param box Box dimensions for periodic boundary condition handling
\param deltaT timestep
\param d_net_force net force on each particle, only used to set "accelerations"
This kernel must be executed with a 1D grid of any block size such that the number of threads is greater than or
equal to the number of members in the group. The kernel's implementation simply reads one particle in each thread
and updates that particle. (Not necessary true for Stokesian Dynamics simulation)
<b>Performance notes:</b>
Particle properties are read via the texture cache to optimize the bandwidth obtained with sparse groups. The writes
in sparse groups will not be coalesced. However, because ParticleGroup sorts the index list the writes will be as
contiguous as possible leading to fewer memory transactions on compute 1.3 hardware and more cache hits on Fermi. (Not sure about this..)
*/
extern "C" __global__
void gpu_stokes_step_one_kernel(
Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
unsigned int group_size,
BoxDim box,
Scalar deltaT,
Scalar4 *d_net_force,
Scalar shear_rate
){
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size){
unsigned int idx = d_group_members[group_idx];
// read the particle's posision (MEM TRANSFER: 16 bytes)
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes)
Scalar4 velmass = d_vel[idx];
Scalar mass = velmass.w;
Scalar3 vel = make_scalar3(velmass.x, velmass.y, velmass.z);
// Add the shear
vel.x += shear_rate * pos.y;
Scalar4 net_force = d_net_force[idx];
Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z);
// update the position
Scalar3 dx = vel * deltaT;
// FLOPS: 3
pos += dx;
accel = accel/mass;
// read in the particle's image (MEM TRANSFER: 16 bytes)
int3 image = d_image[idx];
// fix the periodic boundary conditions (FLOPS: 15)
box.wrap(pos, image);
// write out the results (MEM_TRANSFER: 48 bytes)
d_accel[idx] = accel;
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[idx] = image;
}
}
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_image array of particle images
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group ( i.e. number of particles to consider )
\param box Box dimensions for periodic boundary condition handling
\param dt timestep
\param block_size optimum block size returned by an autotuner
\param d_net_force net force on the particles
\param T temperature
\param timestep time step
\param seed seed for random number generation
\param xi splitting coefficient for Ewald summation
\param eta Spectral splitting parameter
\param P number of nodes in support of each gaussian for k-space sum
\param ewald_cut cut off radius for Ewald summation
\param ewald_dr discretization of look up tables
\param ewald_n number of elements in look up tables
\param d_ewaldC Ewald coefficients for real space sum
\param d_gridk reciprocal lattice vectors and parameters for Ewald reciprocal space sum
\param d_gridX x-component of force moment projection onto the grid
\param d_gridY y-component of force moment projection onto the grid
\param d_gridZ z-component of force moment projection onto the grid
\param plan cudaFFT plan
\param Nx number of grid nodes in the x-direction
\param Ny number of grid nodes in the y-direction
\param Nz number of grid nodes in the z-direction
\param d_n_neigh Number of neighbors for every particle
\param d_nlist Neighbor list of every particle, 2D array, can be accessed by nli
\param nli Index lookup helper for d_nlist
\param cheb_an Chebychev coefficients
\param n_cheb Order of Chebyshev approximation
\param N_total total number of particles ( should be same as group_size )
\param gridh Spacing between grid ndoes
\param cheb_recompute whether to recompute chebyshev approximation
\param eig_recompute whether to recompute eigenvalues of matrix approximation
\param stored_eigenvalue previous max eigenvalue
\param cheb_error error tolerance in chebyshev approximation
*/
hipError_t gpu_stokes_step_one(
Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
Scalar dt,
unsigned int block_size,
Scalar4 *d_net_force,
const Scalar T,
const unsigned int timestep,
const unsigned int seed,
Scalar xi,
Scalar eta,
Scalar ewald_cut,
Scalar ewald_dr,
int ewald_n,
Scalar4 *d_ewaldC1,
Scalar self,
Scalar4 *d_gridk,
CUFFTCOMPLEX *d_gridX,
CUFFTCOMPLEX *d_gridY,
CUFFTCOMPLEX *d_gridZ,
hipfftHandle plan,
const int Nx,
const int Ny,
const int Nz,
const unsigned int *d_n_neigh,
const unsigned int *d_nlist,
const unsigned int *d_headlist,
int& m_Lanczos,
const unsigned int N_total,
const int P,
Scalar3 gridh,
Scalar cheb_error,
Scalar shear_rate
){
// Total number of grid points
unsigned int NxNyNz = Nx*Ny*Nz;
// setup the grid to run the kernel
// block for particle calculation
dim3 grid( (group_size/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// block for grid calculation
int gridBlockSize = ( NxNyNz > block_size ) ? block_size : NxNyNz;
int gridNBlock = ( NxNyNz + gridBlockSize - 1 ) / gridBlockSize ;
// Get the textured tables for real space Ewald sum tabulation
tables1_tex.normalized = false; // Not normalized
tables1_tex.filterMode = hipFilterModeLinear; // Filter mode: floor of the index
// One dimension, Read mode: ElementType(Get what we write)
hipBindTexture(0, tables1_tex, d_ewaldC1, sizeof(Scalar4) * (ewald_n+1)); // This was a bug in former versions!
// Same for the positions and forces
pos_tex.normalized = false; // Not normalized
pos_tex.filterMode = hipFilterModePoint; // Filter mode: floor of the index
hipBindTexture(0, pos_tex, d_pos, sizeof(Scalar4) * N_total);
// Get sheared grid vectors
hipLaunchKernelGGL(( gpu_stokes_SetGridk_kernel), dim3(gridNBlock),dim3(gridBlockSize), 0, 0, d_gridk,Nx,Ny,Nz,NxNyNz,box,xi,eta);
// Do Mobility and Brownian Calculations (compute the velocity from the forces)
gpu_stokes_CombinedMobilityBrownian_wrap(
d_pos,
d_net_force,
d_group_members,
group_size,
box,
dt,
d_vel, // output
T,
timestep,
seed,
xi,
eta,
P,
ewald_cut,
ewald_dr,
ewald_n,
d_ewaldC1,
d_gridk,
d_gridX,
d_gridY,
d_gridZ,
plan,
Nx,
Ny,
Nz,
d_n_neigh,
d_nlist,
d_headlist,
m_Lanczos,
N_total,
NxNyNz,
grid,
threads,
gridBlockSize,
gridNBlock,
gridh,
cheb_error,
self );
// Use forward Euler integration to move the particles according the velocity
// computed from the Mobility and Brownian calculations
hipLaunchKernelGGL(( gpu_stokes_step_one_kernel), dim3(grid), dim3(threads) , 0, 0,
d_pos,
d_vel,
d_accel,
d_image,
d_group_members,
group_size,
box,
dt,
d_net_force,
shear_rate
);
// Quick error check
gpuErrchk(hipPeekAtLastError());
// Cleanup
hipUnbindTexture(tables1_tex);
hipUnbindTexture(pos_tex);
return hipSuccess;
}
| 665a694095d51439d16454e4f6ec70edfc2203c7.cu | /*
Highly Optimized Object-oriented Many-particle Dynamics -- Blue Edition
(HOOMD-blue) Open Source Software License Copyright 2009-2014 The Regents of
the University of Michigan All rights reserved.
HOOMD-blue may contain modifications ("Contributions") provided, and to which
copyright is held, by various Contributors who have granted The Regents of the
University of Michigan the right to modify and/or distribute such Contributions.
You may redistribute, use, and create derivate works of HOOMD-blue, in source
and binary forms, provided you abide by the following conditions:
* Redistributions of source code must retain the above copyright notice, this
list of conditions, and the following disclaimer both in the code and
prominently in any materials provided with the distribution.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions, and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* All publications and presentations based on HOOMD-blue, including any reports
or published results obtained, in whole or in part, with HOOMD-blue, will
acknowledge its use according to the terms posted at the time of submission on:
http://codeblue.umich.edu/hoomd-blue/citations.html
* Any electronic documents citing HOOMD-Blue will link to the HOOMD-Blue website:
http://codeblue.umich.edu/hoomd-blue/
* Apart from the above required attributions, neither the name of the copyright
holder nor the names of HOOMD-blue's contributors may be used to endorse or
promote products derived from this software without specific prior written
permission.
Disclaimer
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS ``AS IS'' AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND/OR ANY
WARRANTIES THAT THIS SOFTWARE IS FREE OF INFRINGEMENT ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// Maintainer: joaander
// Modified by Gang Wang
// Modified by Andrew Fiore
#include "Stokes.cuh"
#include "Mobility.cuh"
#include "Brownian.cuh"
#include "Helper.cuh"
#include "hoomd/Saru.h"
#include "hoomd/TextureTools.h"
#include <stdio.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#ifdef WIN32
#include <cassert>
#else
#include <assert.h>
#endif
//! command to convert floats or doubles to integers
#ifdef SINGLE_PRECISION
#define __scalar2int_rd __float2int_rd
#else
#define __scalar2int_rd __double2int_rd
#endif
#ifndef __ERRCHK_CUH__
#define __ERRCHK_CUH__
//! Function to check for errors
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
/*!
\param code returned error code
\param file which file the error occured in
\param line which line error check was tripped
\param abort whether to kill code upon error trigger
*/
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#endif
/*! \file Stokes.cu
\brief Defines GPU kernel code for integration considering hydrodynamic interactions on the GPU. Used by Stokes.cc.
*/
//! Shared memory array for partial sum of dot product kernel
extern __shared__ Scalar partial_sum[];
extern __shared__ Scalar4 shared_Fpos[];
//! Texture for reading table values
scalar4_tex_t tables1_tex;
//! Texture for reading particle positions
scalar4_tex_t pos_tex;
//! Takes the integration on a group of particles
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_delu1 first 4 components of gradient of particle velocity
\param d_delu2 second 4 components of gradient of particle velocity
\param d_accel array of particle "accelerations" (This is an overdamped integrator, so accelerations don't have physical meaning)
\param d_image array of particle images
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group
\param box Box dimensions for periodic boundary condition handling
\param deltaT timestep
\param d_net_force net force on each particle, only used to set "accelerations"
This kernel must be executed with a 1D grid of any block size such that the number of threads is greater than or
equal to the number of members in the group. The kernel's implementation simply reads one particle in each thread
and updates that particle. (Not necessary true for Stokesian Dynamics simulation)
<b>Performance notes:</b>
Particle properties are read via the texture cache to optimize the bandwidth obtained with sparse groups. The writes
in sparse groups will not be coalesced. However, because ParticleGroup sorts the index list the writes will be as
contiguous as possible leading to fewer memory transactions on compute 1.3 hardware and more cache hits on Fermi. (Not sure about this..)
*/
extern "C" __global__
void gpu_stokes_step_one_kernel(
Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
unsigned int group_size,
BoxDim box,
Scalar deltaT,
Scalar4 *d_net_force,
Scalar shear_rate
){
// determine which particle this thread works on (MEM TRANSFER: 4 bytes)
int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx < group_size){
unsigned int idx = d_group_members[group_idx];
// read the particle's posision (MEM TRANSFER: 16 bytes)
Scalar4 postype = d_pos[idx];
Scalar3 pos = make_scalar3(postype.x, postype.y, postype.z);
// read the particle's velocity and acceleration (MEM TRANSFER: 32 bytes)
Scalar4 velmass = d_vel[idx];
Scalar mass = velmass.w;
Scalar3 vel = make_scalar3(velmass.x, velmass.y, velmass.z);
// Add the shear
vel.x += shear_rate * pos.y;
Scalar4 net_force = d_net_force[idx];
Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z);
// update the position
Scalar3 dx = vel * deltaT;
// FLOPS: 3
pos += dx;
accel = accel/mass;
// read in the particle's image (MEM TRANSFER: 16 bytes)
int3 image = d_image[idx];
// fix the periodic boundary conditions (FLOPS: 15)
box.wrap(pos, image);
// write out the results (MEM_TRANSFER: 48 bytes)
d_accel[idx] = accel;
d_pos[idx] = make_scalar4(pos.x, pos.y, pos.z, postype.w);
d_image[idx] = image;
}
}
/*! \param d_pos array of particle positions
\param d_vel array of particle velocities
\param d_accel array of particle accelerations
\param d_image array of particle images
\param d_group_members Device array listing the indicies of the mebers of the group to integrate
\param group_size Number of members in the group ( i.e. number of particles to consider )
\param box Box dimensions for periodic boundary condition handling
\param dt timestep
\param block_size optimum block size returned by an autotuner
\param d_net_force net force on the particles
\param T temperature
\param timestep time step
\param seed seed for random number generation
\param xi splitting coefficient for Ewald summation
\param eta Spectral splitting parameter
\param P number of nodes in support of each gaussian for k-space sum
\param ewald_cut cut off radius for Ewald summation
\param ewald_dr discretization of look up tables
\param ewald_n number of elements in look up tables
\param d_ewaldC Ewald coefficients for real space sum
\param d_gridk reciprocal lattice vectors and parameters for Ewald reciprocal space sum
\param d_gridX x-component of force moment projection onto the grid
\param d_gridY y-component of force moment projection onto the grid
\param d_gridZ z-component of force moment projection onto the grid
\param plan cudaFFT plan
\param Nx number of grid nodes in the x-direction
\param Ny number of grid nodes in the y-direction
\param Nz number of grid nodes in the z-direction
\param d_n_neigh Number of neighbors for every particle
\param d_nlist Neighbor list of every particle, 2D array, can be accessed by nli
\param nli Index lookup helper for d_nlist
\param cheb_an Chebychev coefficients
\param n_cheb Order of Chebyshev approximation
\param N_total total number of particles ( should be same as group_size )
\param gridh Spacing between grid ndoes
\param cheb_recompute whether to recompute chebyshev approximation
\param eig_recompute whether to recompute eigenvalues of matrix approximation
\param stored_eigenvalue previous max eigenvalue
\param cheb_error error tolerance in chebyshev approximation
*/
cudaError_t gpu_stokes_step_one(
Scalar4 *d_pos,
Scalar4 *d_vel,
Scalar3 *d_accel,
int3 *d_image,
unsigned int *d_group_members,
unsigned int group_size,
const BoxDim& box,
Scalar dt,
unsigned int block_size,
Scalar4 *d_net_force,
const Scalar T,
const unsigned int timestep,
const unsigned int seed,
Scalar xi,
Scalar eta,
Scalar ewald_cut,
Scalar ewald_dr,
int ewald_n,
Scalar4 *d_ewaldC1,
Scalar self,
Scalar4 *d_gridk,
CUFFTCOMPLEX *d_gridX,
CUFFTCOMPLEX *d_gridY,
CUFFTCOMPLEX *d_gridZ,
cufftHandle plan,
const int Nx,
const int Ny,
const int Nz,
const unsigned int *d_n_neigh,
const unsigned int *d_nlist,
const unsigned int *d_headlist,
int& m_Lanczos,
const unsigned int N_total,
const int P,
Scalar3 gridh,
Scalar cheb_error,
Scalar shear_rate
){
// Total number of grid points
unsigned int NxNyNz = Nx*Ny*Nz;
// setup the grid to run the kernel
// block for particle calculation
dim3 grid( (group_size/block_size) + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// block for grid calculation
int gridBlockSize = ( NxNyNz > block_size ) ? block_size : NxNyNz;
int gridNBlock = ( NxNyNz + gridBlockSize - 1 ) / gridBlockSize ;
// Get the textured tables for real space Ewald sum tabulation
tables1_tex.normalized = false; // Not normalized
tables1_tex.filterMode = cudaFilterModeLinear; // Filter mode: floor of the index
// One dimension, Read mode: ElementType(Get what we write)
cudaBindTexture(0, tables1_tex, d_ewaldC1, sizeof(Scalar4) * (ewald_n+1)); // This was a bug in former versions!
// Same for the positions and forces
pos_tex.normalized = false; // Not normalized
pos_tex.filterMode = cudaFilterModePoint; // Filter mode: floor of the index
cudaBindTexture(0, pos_tex, d_pos, sizeof(Scalar4) * N_total);
// Get sheared grid vectors
gpu_stokes_SetGridk_kernel<<<gridNBlock,gridBlockSize>>>(d_gridk,Nx,Ny,Nz,NxNyNz,box,xi,eta);
// Do Mobility and Brownian Calculations (compute the velocity from the forces)
gpu_stokes_CombinedMobilityBrownian_wrap(
d_pos,
d_net_force,
d_group_members,
group_size,
box,
dt,
d_vel, // output
T,
timestep,
seed,
xi,
eta,
P,
ewald_cut,
ewald_dr,
ewald_n,
d_ewaldC1,
d_gridk,
d_gridX,
d_gridY,
d_gridZ,
plan,
Nx,
Ny,
Nz,
d_n_neigh,
d_nlist,
d_headlist,
m_Lanczos,
N_total,
NxNyNz,
grid,
threads,
gridBlockSize,
gridNBlock,
gridh,
cheb_error,
self );
// Use forward Euler integration to move the particles according the velocity
// computed from the Mobility and Brownian calculations
gpu_stokes_step_one_kernel<<< grid, threads >>>(
d_pos,
d_vel,
d_accel,
d_image,
d_group_members,
group_size,
box,
dt,
d_net_force,
shear_rate
);
// Quick error check
gpuErrchk(cudaPeekAtLastError());
// Cleanup
cudaUnbindTexture(tables1_tex);
cudaUnbindTexture(pos_tex);
return cudaSuccess;
}
|
cf5b1c6d9c626367f643ac66416fe4ce5d521805.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "FourierTransform.h"
#include <stdio.h>
#include "ReadFile.h"
#include<iostream>
#include "cufftw.h"
#include "DifferenceKernel.h"
#include "Cuda_Help.cuh"
#include <vector>
//#define BLOCK_DIM 1024
using namespace std;
int main(int argc, char** argv)
{
FourierTransform ft = FourierTransform();
ReadFile * rf = new ReadFile();
string musicDir;
string sampleDir;
bool get_from_user = true;
if (argc == 3) {
get_from_user = false;
musicDir = string(argv[1]);
sampleDir = string(argv[2]);
}
while (true)
{
cout << "enter MusicDir :" << endl << ">>";
if (get_from_user) { cin >> musicDir; };
bool result = rf->readMusicDir(musicDir);
if (result) { break; }
else { get_from_user = true; }
}
cout << sampleDir << endl;
while (true)
{
cout << "enter sample dir :" << endl << ">>";
if (get_from_user) { cin >> sampleDir; };
bool result = rf->readSampleDir(sampleDir);
if (result) { break; }
else { get_from_user = true; }
}
rf->readMusics();
rf->testSamples();
//rf.readMusicDir();
//rf.readSampleDir();
//rf.testSamples();
//return 0;
//vector<float>* ra = rf.read("D:\\misc\\ffmpg\\Data\\09. Run Like Hell.txt");
//vector<float>* ra1 = rf.read("D:\\misc\\ffmpg\\Data\\02. Is There Anybody Out There_sample.txt");
//for (int i = 1000; i < 20000; i++)
//hipfftComplex* cufft1 = ft.transform(ra1->array, ra1->size);
//cout << endl;
//hipfftComplex* cufft = ft.transform(ra->array, ra->size);
//DifferenceKernel dk;
//dk.distance(ra,ra1);
//float* result=(float *)malloc(2*sizeof(float));
//result[0] = 0.0;
//result[0] = 1.0;
//dist(cufft1, cufft,(ra->size /2)+1,result);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
| cf5b1c6d9c626367f643ac66416fe4ce5d521805.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "FourierTransform.h"
#include <stdio.h>
#include "ReadFile.h"
#include<iostream>
#include "cufftw.h"
#include "DifferenceKernel.h"
#include "Cuda_Help.cuh"
#include <vector>
//#define BLOCK_DIM 1024
using namespace std;
int main(int argc, char** argv)
{
FourierTransform ft = FourierTransform();
ReadFile * rf = new ReadFile();
string musicDir;
string sampleDir;
bool get_from_user = true;
if (argc == 3) {
get_from_user = false;
musicDir = string(argv[1]);
sampleDir = string(argv[2]);
}
while (true)
{
cout << "enter MusicDir :" << endl << ">>";
if (get_from_user) { cin >> musicDir; };
bool result = rf->readMusicDir(musicDir);
if (result) { break; }
else { get_from_user = true; }
}
cout << sampleDir << endl;
while (true)
{
cout << "enter sample dir :" << endl << ">>";
if (get_from_user) { cin >> sampleDir; };
bool result = rf->readSampleDir(sampleDir);
if (result) { break; }
else { get_from_user = true; }
}
rf->readMusics();
rf->testSamples();
//rf.readMusicDir();
//rf.readSampleDir();
//rf.testSamples();
//return 0;
//vector<float>* ra = rf.read("D:\\misc\\ffmpg\\Data\\09. Run Like Hell.txt");
//vector<float>* ra1 = rf.read("D:\\misc\\ffmpg\\Data\\02. Is There Anybody Out There_sample.txt");
//for (int i = 1000; i < 20000; i++)
//cufftComplex* cufft1 = ft.transform(ra1->array, ra1->size);
//cout << endl;
//cufftComplex* cufft = ft.transform(ra->array, ra->size);
//DifferenceKernel dk;
//dk.distance(ra,ra1);
//float* result=(float *)malloc(2*sizeof(float));
//result[0] = 0.0;
//result[0] = 1.0;
//dist(cufft1, cufft,(ra->size /2)+1,result);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
|
e05c70227c4608442964132fdae5b514c8aa1655.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <gptl.h>
#include <gptl_cuda.h>
#include "./localproto.h"
__global__ void setup_handles (int *, int *, int *, int *, int *, int *);
__global__ void doalot (int, int, int, int, int,
float *, float *, double *,
int *, int *, int *,
int *, int *);
int *total_gputime;
int *donothing_handle;
int *doalot_log_handle;
int *doalot_log_inner_handle;
int *doalot_sqrt_handle;
int *doalot_sqrt_double_handle;
float *logvals;
float *sqrtvals;
double *dsqrtvals;
__host__ int persist (int mostwork, int outerlooplen,
int innerlooplen, int balfact, int oversub,
int cores_per_sm, int cores_per_gpu)
{
int blocksize, gridsize;
int ret;
int n, nn;
int totalwork;
hipEvent_t tstart, tstop;
float dt;
int chunksize;
int nchunks;
hipError_t cret;
static const char *thisfunc = "persist";
if (hipMallocManaged (&total_gputime, sizeof (int)) != hipSuccess)
printf ("hipMallocManaged error total_gputime\n");
if (hipMallocManaged (&donothing_handle, sizeof (int)) != hipSuccess)
printf ("hipMallocManaged error donothing_handle\n");
if (hipMallocManaged (&doalot_log_handle, sizeof (int)) != hipSuccess)
printf ("hipMallocManaged error doalot_log_handle\n");
if (hipMallocManaged (&doalot_log_inner_handle, sizeof (int)) != hipSuccess)
printf ("hipMallocManaged error doalot_log_inner_handle\n");
if (hipMallocManaged (&doalot_sqrt_handle, sizeof (int)) != hipSuccess)
printf ("hipMallocManaged error doalot_sqrt_handle\n");
if (hipMallocManaged (&doalot_sqrt_double_handle, sizeof (int)) != hipSuccess)
printf ("hipMallocManaged error doalot_sqrt_double_handle\n");
printf ("%s: issuing hipMallocManaged calls to hold results\n", thisfunc);
if ((cret = hipMallocManaged (&logvals, outerlooplen * sizeof (float))) != hipSuccess)
printf ("hipMallocManaged error logvals:%s\n", hipGetErrorString (cret));
if (hipMallocManaged (&sqrtvals, outerlooplen * sizeof (float)) != hipSuccess)
printf ("hipMallocManaged error sqrtvals\n");
if (hipMallocManaged (&dsqrtvals, outerlooplen * sizeof (double)) != hipSuccess)
printf ("hipMallocManaged error dsqrtvals\n");
hipLaunchKernelGGL(( setup_handles) , dim3(1),dim3(1), 0, 0, total_gputime, donothing_handle, doalot_log_handle,
doalot_log_inner_handle, doalot_sqrt_handle, doalot_sqrt_double_handle);
hipDeviceSynchronize();
printf ("called hipDeviceSynchronize 1\n");
chunksize = oversub * cores_per_gpu;
nchunks = (outerlooplen + (chunksize-1)) / chunksize;
printf ("outerlooplen=%d broken into %d kernels of chunksize=%d\n",
outerlooplen, nchunks, chunksize);
n = 0;
for (nn = 0; nn < outerlooplen; nn += chunksize) {
printf ("chunk=%d totalwork=%d\n", n, MIN (chunksize, outerlooplen - nn));
++n;
}
for (nn = 0; nn < outerlooplen; nn += chunksize) {
totalwork = MIN (chunksize, outerlooplen - nn);
blocksize = MIN (cores_per_sm, totalwork);
gridsize = (totalwork-1) / blocksize + 1;
ret = GPTLstart ("total_kerneltime");
ret = GPTLstart ("donothing");
hipLaunchKernelGGL(( donothing) , dim3(gridsize), dim3(blocksize), 0, 0, total_gputime, donothing_handle);
hipDeviceSynchronize();
ret = GPTLstop ("donothing");
ret = GPTLstop ("total_kerneltime");
}
for (nn = 0; nn < outerlooplen; nn += chunksize) {
totalwork = MIN (chunksize, outerlooplen - nn);
blocksize = MIN (cores_per_sm, totalwork);
gridsize = (totalwork-1) / blocksize + 1;
printf ("Invoking doalot gridsize=%d blocksize=%d\n", gridsize, blocksize);
ret = GPTLstart ("total_kerneltime");
ret = GPTLstart ("doalot");
hipLaunchKernelGGL(( doalot) , dim3(gridsize), dim3(blocksize), 0, 0, nn, outerlooplen, innerlooplen, balfact, mostwork,
logvals, sqrtvals, dsqrtvals,
total_gputime, doalot_log_handle, doalot_log_inner_handle,
doalot_sqrt_handle, doalot_sqrt_double_handle);
hipDeviceSynchronize();
ret = GPTLstop ("doalot");
ret = GPTLstop ("total_kerneltime");
}
// create events (start and stop):
hipEventCreate(&tstart);
hipEventCreate(&tstop);
// to time region of cuda code:
hipEventRecord(tstart, 0); // the '0' is the stream id
printf ("Sleeping 1 second on GPU...\n");
ret = GPTLstart ("total_kerneltime");
ret = GPTLstart ("sleep1ongpu");
for (nn = 0; nn < outerlooplen; nn += chunksize) {
totalwork = MIN (chunksize, outerlooplen - nn);
blocksize = MIN (cores_per_sm, totalwork);
gridsize = (totalwork-1) / blocksize + 1;
hipLaunchKernelGGL(( sleep) , dim3(gridsize), dim3(blocksize), 0, 0, 1.f, outerlooplen);
hipDeviceSynchronize();
}
ret = GPTLstop ("sleep1ongpu");
ret = GPTLstop ("total_kerneltime");
hipEventRecord(tstop, 0);
hipEventSynchronize(tstop); // make sure 'stop' is safe to use
hipEventElapsedTime(&dt, tstart, tstop); // time in ms
printf ("Stream timer for sleep=%f seconds\n", dt*0.001);
hipEventDestroy (tstart);
hipEventDestroy (tstop);
return 0;
}
__global__ void doalot (int nn, int outerlooplen, int innerlooplen, int balfact, int mostwork,
float *logvals, float *sqrtvals, double *dsqrtvals,
int *total_gputime, int *doalot_log_handle, int *doalot_log_inner_handle,
int *doalot_sqrt_handle, int *doalot_sqrt_double_handle)
{
int ret;
float factor;
int blockId;
int n, nnn;
int niter;
blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
n = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
nnn = nn + n;
factor = (float) nnn / (float) (outerlooplen-1);
switch (balfact) {
case 0:
niter = (int) (factor * mostwork);
break;
case 1:
niter = mostwork;
break;
case 2:
niter = mostwork - (int) (factor * mostwork);
break;
default:
printf ("doalot: bad balfact=%d--returning prematurely\n", balfact);
return;
}
ret = GPTLstart_gpu (*total_gputime);
if (nnn < outerlooplen) {
ret = GPTLstart_gpu (*doalot_log_handle);
logvals[nnn] = doalot_log (niter, innerlooplen);
ret = GPTLstop_gpu (*doalot_log_handle);
logvals[nnn] = doalot_log_inner (niter, innerlooplen, doalot_log_inner_handle);
ret = GPTLstart_gpu (*doalot_sqrt_handle);
sqrtvals[nnn] = doalot_sqrt (niter, innerlooplen);
ret = GPTLstop_gpu (*doalot_sqrt_handle);
ret = GPTLstart_gpu (*doalot_sqrt_double_handle);
dsqrtvals[nnn] = doalot_sqrt_double (niter, innerlooplen);
ret = GPTLstop_gpu (*doalot_sqrt_double_handle);
}
ret = GPTLstop_gpu (*total_gputime);
}
__global__ void setup_handles (int *total_gputime, int *donothing_handle, int *doalot_log_handle,
int *doalot_log_inner_handle, int *doalot_sqrt_handle, int *doalot_sqrt_double_handle)
{
int ret;
ret = GPTLinit_handle_gpu ("total_gputime", total_gputime);
ret = GPTLinit_handle_gpu ("donothing", donothing_handle);
ret = GPTLinit_handle_gpu ("doalot_log", doalot_log_handle);
ret = GPTLinit_handle_gpu ("doalot_log_inner", doalot_log_inner_handle);
ret = GPTLinit_handle_gpu ("doalot_sqrt", doalot_sqrt_handle);
ret = GPTLinit_handle_gpu ("doalot_sqrt_double", doalot_sqrt_double_handle);
}
| e05c70227c4608442964132fdae5b514c8aa1655.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <gptl.h>
#include <gptl_cuda.h>
#include "./localproto.h"
__global__ void setup_handles (int *, int *, int *, int *, int *, int *);
__global__ void doalot (int, int, int, int, int,
float *, float *, double *,
int *, int *, int *,
int *, int *);
int *total_gputime;
int *donothing_handle;
int *doalot_log_handle;
int *doalot_log_inner_handle;
int *doalot_sqrt_handle;
int *doalot_sqrt_double_handle;
float *logvals;
float *sqrtvals;
double *dsqrtvals;
__host__ int persist (int mostwork, int outerlooplen,
int innerlooplen, int balfact, int oversub,
int cores_per_sm, int cores_per_gpu)
{
int blocksize, gridsize;
int ret;
int n, nn;
int totalwork;
cudaEvent_t tstart, tstop;
float dt;
int chunksize;
int nchunks;
cudaError_t cret;
static const char *thisfunc = "persist";
if (cudaMallocManaged (&total_gputime, sizeof (int)) != cudaSuccess)
printf ("cudaMallocManaged error total_gputime\n");
if (cudaMallocManaged (&donothing_handle, sizeof (int)) != cudaSuccess)
printf ("cudaMallocManaged error donothing_handle\n");
if (cudaMallocManaged (&doalot_log_handle, sizeof (int)) != cudaSuccess)
printf ("cudaMallocManaged error doalot_log_handle\n");
if (cudaMallocManaged (&doalot_log_inner_handle, sizeof (int)) != cudaSuccess)
printf ("cudaMallocManaged error doalot_log_inner_handle\n");
if (cudaMallocManaged (&doalot_sqrt_handle, sizeof (int)) != cudaSuccess)
printf ("cudaMallocManaged error doalot_sqrt_handle\n");
if (cudaMallocManaged (&doalot_sqrt_double_handle, sizeof (int)) != cudaSuccess)
printf ("cudaMallocManaged error doalot_sqrt_double_handle\n");
printf ("%s: issuing cudaMallocManaged calls to hold results\n", thisfunc);
if ((cret = cudaMallocManaged (&logvals, outerlooplen * sizeof (float))) != cudaSuccess)
printf ("cudaMallocManaged error logvals:%s\n", cudaGetErrorString (cret));
if (cudaMallocManaged (&sqrtvals, outerlooplen * sizeof (float)) != cudaSuccess)
printf ("cudaMallocManaged error sqrtvals\n");
if (cudaMallocManaged (&dsqrtvals, outerlooplen * sizeof (double)) != cudaSuccess)
printf ("cudaMallocManaged error dsqrtvals\n");
setup_handles <<<1,1>>> (total_gputime, donothing_handle, doalot_log_handle,
doalot_log_inner_handle, doalot_sqrt_handle, doalot_sqrt_double_handle);
cudaDeviceSynchronize();
printf ("called cudaDeviceSynchronize 1\n");
chunksize = oversub * cores_per_gpu;
nchunks = (outerlooplen + (chunksize-1)) / chunksize;
printf ("outerlooplen=%d broken into %d kernels of chunksize=%d\n",
outerlooplen, nchunks, chunksize);
n = 0;
for (nn = 0; nn < outerlooplen; nn += chunksize) {
printf ("chunk=%d totalwork=%d\n", n, MIN (chunksize, outerlooplen - nn));
++n;
}
for (nn = 0; nn < outerlooplen; nn += chunksize) {
totalwork = MIN (chunksize, outerlooplen - nn);
blocksize = MIN (cores_per_sm, totalwork);
gridsize = (totalwork-1) / blocksize + 1;
ret = GPTLstart ("total_kerneltime");
ret = GPTLstart ("donothing");
donothing <<<gridsize, blocksize>>> (total_gputime, donothing_handle);
cudaDeviceSynchronize();
ret = GPTLstop ("donothing");
ret = GPTLstop ("total_kerneltime");
}
for (nn = 0; nn < outerlooplen; nn += chunksize) {
totalwork = MIN (chunksize, outerlooplen - nn);
blocksize = MIN (cores_per_sm, totalwork);
gridsize = (totalwork-1) / blocksize + 1;
printf ("Invoking doalot gridsize=%d blocksize=%d\n", gridsize, blocksize);
ret = GPTLstart ("total_kerneltime");
ret = GPTLstart ("doalot");
doalot <<<gridsize, blocksize>>> (nn, outerlooplen, innerlooplen, balfact, mostwork,
logvals, sqrtvals, dsqrtvals,
total_gputime, doalot_log_handle, doalot_log_inner_handle,
doalot_sqrt_handle, doalot_sqrt_double_handle);
cudaDeviceSynchronize();
ret = GPTLstop ("doalot");
ret = GPTLstop ("total_kerneltime");
}
// create events (start and stop):
cudaEventCreate(&tstart);
cudaEventCreate(&tstop);
// to time region of cuda code:
cudaEventRecord(tstart, 0); // the '0' is the stream id
printf ("Sleeping 1 second on GPU...\n");
ret = GPTLstart ("total_kerneltime");
ret = GPTLstart ("sleep1ongpu");
for (nn = 0; nn < outerlooplen; nn += chunksize) {
totalwork = MIN (chunksize, outerlooplen - nn);
blocksize = MIN (cores_per_sm, totalwork);
gridsize = (totalwork-1) / blocksize + 1;
sleep <<<gridsize, blocksize>>> (1.f, outerlooplen);
cudaDeviceSynchronize();
}
ret = GPTLstop ("sleep1ongpu");
ret = GPTLstop ("total_kerneltime");
cudaEventRecord(tstop, 0);
cudaEventSynchronize(tstop); // make sure 'stop' is safe to use
cudaEventElapsedTime(&dt, tstart, tstop); // time in ms
printf ("Stream timer for sleep=%f seconds\n", dt*0.001);
cudaEventDestroy (tstart);
cudaEventDestroy (tstop);
return 0;
}
__global__ void doalot (int nn, int outerlooplen, int innerlooplen, int balfact, int mostwork,
float *logvals, float *sqrtvals, double *dsqrtvals,
int *total_gputime, int *doalot_log_handle, int *doalot_log_inner_handle,
int *doalot_sqrt_handle, int *doalot_sqrt_double_handle)
{
int ret;
float factor;
int blockId;
int n, nnn;
int niter;
blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
n = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
nnn = nn + n;
factor = (float) nnn / (float) (outerlooplen-1);
switch (balfact) {
case 0:
niter = (int) (factor * mostwork);
break;
case 1:
niter = mostwork;
break;
case 2:
niter = mostwork - (int) (factor * mostwork);
break;
default:
printf ("doalot: bad balfact=%d--returning prematurely\n", balfact);
return;
}
ret = GPTLstart_gpu (*total_gputime);
if (nnn < outerlooplen) {
ret = GPTLstart_gpu (*doalot_log_handle);
logvals[nnn] = doalot_log (niter, innerlooplen);
ret = GPTLstop_gpu (*doalot_log_handle);
logvals[nnn] = doalot_log_inner (niter, innerlooplen, doalot_log_inner_handle);
ret = GPTLstart_gpu (*doalot_sqrt_handle);
sqrtvals[nnn] = doalot_sqrt (niter, innerlooplen);
ret = GPTLstop_gpu (*doalot_sqrt_handle);
ret = GPTLstart_gpu (*doalot_sqrt_double_handle);
dsqrtvals[nnn] = doalot_sqrt_double (niter, innerlooplen);
ret = GPTLstop_gpu (*doalot_sqrt_double_handle);
}
ret = GPTLstop_gpu (*total_gputime);
}
__global__ void setup_handles (int *total_gputime, int *donothing_handle, int *doalot_log_handle,
int *doalot_log_inner_handle, int *doalot_sqrt_handle, int *doalot_sqrt_double_handle)
{
int ret;
ret = GPTLinit_handle_gpu ("total_gputime", total_gputime);
ret = GPTLinit_handle_gpu ("donothing", donothing_handle);
ret = GPTLinit_handle_gpu ("doalot_log", doalot_log_handle);
ret = GPTLinit_handle_gpu ("doalot_log_inner", doalot_log_inner_handle);
ret = GPTLinit_handle_gpu ("doalot_sqrt", doalot_sqrt_handle);
ret = GPTLinit_handle_gpu ("doalot_sqrt_double", doalot_sqrt_double_handle);
}
|
39e04968ab94c574407b64ed0fd435b93dd0d676.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 19.04.2018
// @author [email protected]
//
#include <op_boilerplate.h>
#include <ops/declarable/helpers/activations.h>
#include <ShapeUtils.h>
#include <numeric>
#include <PointersManager.h>
namespace nd4j {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__global__ void preluCuda(const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz) {
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<X*>(vz);
__shared__ Nd4jLong len;
if (threadIdx.x == 0)
len = shape::length(xShapeInfo);
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto totalThreads = gridDim.x * blockDim.x;
for (int i = tid; i < len; i += totalThreads) {
const auto xzOffset = shape::getIndexOffset(i, xShapeInfo, len);
const auto xVal = x[xzOffset];
if(xVal < 0)
z[xzOffset] = xVal * y[shape::subArrayOffset(i, xShapeInfo, yShapeInfo)];
else
z[xzOffset] = xVal;
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
linkage void preluCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz) {
hipLaunchKernelGGL(( preluCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vx, xShapeInfo, vy, yShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void prelu(nd4j::LaunchContext * context, const NDArray& input, const NDArray& alpha, NDArray& output) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
if(!alpha.isActualOnDeviceSide()) alpha.syncToDevice();
const auto xType = input.dataType();
const auto yType = alpha.dataType();
int threadsPerBlock = MAX_NUM_THREADS;
int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
BUILD_DOUBLE_SELECTOR(xType, yType, preluCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), alpha.getSpecialBuffer(), alpha.getSpecialShapeInfo(), output.getSpecialBuffer()), LIBND4J_TYPES, FLOAT_TYPES);
input.tickReadHost();
alpha.tickReadHost();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void softMaxForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : nd4j::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = nd4j::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] = nd4j::math::nd4j_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate z[offset] / sum ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] /= shmem[0];
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void softMaxForVectorCudaLauncher(const hipStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
hipLaunchKernelGGL(( softMaxForVectorCuda<T>), dim3(1), dim3(MAX_NUM_THREADS), MAX_NUM_THREADS * sizeof(T) + 512, *stream, vx, xzShapeInfo, vz);
}
//////////////////////////////////////////////////////////////////////////
void softmax(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
if(input.isVector()) {
if(rank == 1 || input.sizeAt(dimension) != 1) {
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
input.tickReadDevice();
}
else
output = 1.;
}
else {
auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDims(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDims(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
input.tickReadDevice();
}
PointersManager manager(context, "helpers::softmax");
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void logSoftMaxForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : nd4j::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = nd4j::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] = nd4j::math::nd4j_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate log(z[offset] / sum) ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] = nd4j::math::nd4j_log<T,T>(z[offset] / shmem[0]);
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void logSoftMaxForVectorCudaLauncher(const hipStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
hipLaunchKernelGGL(( logSoftMaxForVectorCuda<T>), dim3(1), dim3(MAX_NUM_THREADS), MAX_NUM_THREADS * sizeof(T) + 512, *stream, vx, xzShapeInfo, vz);
}
//////////////////////////////////////////////////////////////////////////
void logSoftmax(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
if(input.isVector()) {
if(rank == 1 || input.sizeAt(dimension) != 1) {
BUILD_SINGLE_SELECTOR(input.dataType(), logSoftMaxForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
input.tickReadDevice();
}
else
output = 0.;
}
else {
auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDims(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDims(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output.applyTransform(transform::Log);
input.tickReadDevice();
}
PointersManager manager(context, "helpers::logSoftmax");
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ linkage void softMaxDerivForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : nd4j::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = nd4j::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] = nd4j::math::nd4j_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate (z[offset] / sum) and derivative z[offset] = z[offset] * (1 - z[offset]) ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] /= shmem[0];
z[offset] *= (1.f - z[offset]); // derivative
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void softMaxDerivForVectorCudaLauncher(const hipStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
hipLaunchKernelGGL(( softMaxDerivForVectorCuda<T>), dim3(1), dim3(MAX_NUM_THREADS), MAX_NUM_THREADS * sizeof(T) + 512, *stream, vx, xzShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void softmaxDerivative(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
int temp;
if(shape::isCommonVector(input.getShapeInfo(), temp)) {
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxDerivForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
input.tickReadDevice();
}
else {
auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDims(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDims(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output *= (1.f - output); // derivative
input.tickReadDevice();
}
PointersManager manager(context, "helpers::softmaxDerivative");
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y, typename Z>
__global__ linkage void preluBPCuda(const void *vIn, const Nd4jLong *inShapeInfo,
const void *vAlpha, const Nd4jLong *alphaShapeInfo,
const void *vdLdO, const Nd4jLong *dLdOShapeInfo,
void *vdLdI, const Nd4jLong *dLdIShapeInfo,
void *vdLdA, const Nd4jLong *dLdAShapeInfo) {
const auto in = reinterpret_cast<const X*>(vIn);
const auto alpha = reinterpret_cast<const Y*>(vAlpha);
const auto dLdO = reinterpret_cast<const Z*>(vdLdO);
auto dLdI = reinterpret_cast<Z*>(vdLdI);
auto dLdA = reinterpret_cast<Z*>(vdLdA);
__shared__ Nd4jLong alphaLen;
if (threadIdx.x == 0)
alphaLen = shape::length(alphaShapeInfo);
__syncthreads();
const auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= alphaLen) return;
Nd4jLong inputIdxs[MAX_RANK*2];
int numIdxs = shape::outerArrayOffsets(inputIdxs, i, inShapeInfo, alphaShapeInfo);
Nd4jLong dLdOIdxs[MAX_RANK*2];
shape::outerArrayOffsets(dLdOIdxs, i, dLdOShapeInfo, alphaShapeInfo);
Nd4jLong dLdIIdxs[MAX_RANK*2];
shape::outerArrayOffsets(dLdIIdxs, i, dLdIShapeInfo, alphaShapeInfo);
const auto alphaOffset = shape::getIndexOffset(i, alphaShapeInfo, alphaLen);
const auto dLdAOffset = shape::getIndexOffset(i, dLdAShapeInfo, alphaLen);
for(Nd4jLong j = 0; j < numIdxs; ++j) {
const auto inInd = inputIdxs[j];
const auto dLdOInd = dLdOIdxs[j];
const auto dLdIInd = dLdIIdxs[j];
if(in[inInd] < 0) {
dLdI[dLdIInd] = dLdO[dLdOInd] * alpha[alphaOffset];
auto prevVal = dLdA[dLdAOffset];
prevVal = prevVal + dLdO[dLdOInd] * in[inInd];
dLdA[dLdAOffset] = prevVal;
}
else
dLdI[dLdIInd] = dLdO[dLdOInd];
}
}
template<typename X, typename Y, typename Z>
__host__ linkage void preluBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void *vIn, const Nd4jLong *inShapeInfo, const void *vAlpha, const Nd4jLong *alphaShapeInfo, const void *vdLdO, const Nd4jLong *dLdOShapeInfo, void *vdLdI, const Nd4jLong *dLdIShapeInfo, void *vdLdA, const Nd4jLong *dLdAShapeInfo) {
hipLaunchKernelGGL(( preluBPCuda<X, Y, Z>), dim3(blocksPerGrid), dim3(threadsPerBlock), 1024, *stream, vIn, inShapeInfo, vAlpha, alphaShapeInfo, vdLdO, dLdOShapeInfo, vdLdI, dLdIShapeInfo, vdLdA, dLdAShapeInfo);
}
//////////////////////////////////////////////////////////////////////////
void preluBP(nd4j::LaunchContext * context, const NDArray& input, const NDArray& alpha, const NDArray& dLdO, NDArray& dLdI, NDArray& dLdA) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
if(!alpha.isActualOnDeviceSide()) alpha.syncToDevice();
if(!dLdO.isActualOnDeviceSide()) dLdO.syncToDevice();
const auto xType = input.dataType();
const auto yType = alpha.dataType();
const auto zType = dLdO.dataType();
int threadsPerBlock = MAX_NUM_THREADS;
int blocksPerGrid = (alpha.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
BUILD_TRIPLE_SELECTOR(xType, yType, zType, preluBPCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), alpha.getSpecialBuffer(), alpha.getSpecialShapeInfo(), dLdO.getSpecialBuffer(), dLdO.getSpecialShapeInfo(), dLdI.getSpecialBuffer(), dLdI.getSpecialShapeInfo(), dLdA.getSpecialBuffer(), dLdA.getSpecialShapeInfo()), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
input.tickReadHost();
alpha.tickReadHost();
dLdO.tickReadHost();
dLdI.tickWriteDevice();
dLdA.tickWriteDevice();
}
template <typename T>
linkage void thresholdRelu_(NDArray const& input, double threshold, NDArray& output) {
auto routine = LAMBDA_T(_x, threshold) {
return _x > (T)threshold ? _x: (T)0.f;
};
const_cast<NDArray&>(input).applyLambda(routine, &output);
}
void thresholdRelu(nd4j::LaunchContext * context, NDArray const& input, double threshold, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), thresholdRelu_, (input, threshold, output), FLOAT_TYPES);
}
template <typename T>
linkage void thresholdReluDerivative_(NDArray* input, double theta, NDArray* dLdO, NDArray* output) {
}
void thresholdReluDerivative(nd4j::LaunchContext * context, NDArray* input, double threshold, NDArray* dLdO, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), thresholdReluDerivative_, (input, threshold, dLdO, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void thresholdReluDerivative_, (NDArray* input, double threshold, NDArray* dLdO, NDArray* output), FLOAT_TYPES);
BUILD_DOUBLE_TEMPLATE(template void preluCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz), LIBND4J_TYPES, FLOAT_TYPES);
BUILD_TRIPLE_TEMPLATE(template void preluBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void *vIn, const Nd4jLong *inShapeInfo, const void *vAlpha, const Nd4jLong *alphaShapeInfo, const void *vdLdO, const Nd4jLong *dLdOShapeInfo, void *vdLdI, const Nd4jLong *dLdIShapeInfo, void *vdLdA, const Nd4jLong *dLdAShapeInfo), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
BUILD_SINGLE_TEMPLATE(template void softMaxForVectorCudaLauncher, (const hipStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz), FLOAT_TYPES);
BUILD_SINGLE_TEMPLATE(template void softMaxDerivForVectorCudaLauncher, (const hipStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz), FLOAT_TYPES);
}
}
}
| 39e04968ab94c574407b64ed0fd435b93dd0d676.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 19.04.2018
// @author [email protected]
//
#include <op_boilerplate.h>
#include <ops/declarable/helpers/activations.h>
#include <ShapeUtils.h>
#include <numeric>
#include <PointersManager.h>
namespace nd4j {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__global__ void preluCuda(const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz) {
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<X*>(vz);
__shared__ Nd4jLong len;
if (threadIdx.x == 0)
len = shape::length(xShapeInfo);
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto totalThreads = gridDim.x * blockDim.x;
for (int i = tid; i < len; i += totalThreads) {
const auto xzOffset = shape::getIndexOffset(i, xShapeInfo, len);
const auto xVal = x[xzOffset];
if(xVal < 0)
z[xzOffset] = xVal * y[shape::subArrayOffset(i, xShapeInfo, yShapeInfo)];
else
z[xzOffset] = xVal;
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
linkage void preluCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz) {
preluCuda<X, Y><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void prelu(nd4j::LaunchContext * context, const NDArray& input, const NDArray& alpha, NDArray& output) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
if(!alpha.isActualOnDeviceSide()) alpha.syncToDevice();
const auto xType = input.dataType();
const auto yType = alpha.dataType();
int threadsPerBlock = MAX_NUM_THREADS;
int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
BUILD_DOUBLE_SELECTOR(xType, yType, preluCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), alpha.getSpecialBuffer(), alpha.getSpecialShapeInfo(), output.getSpecialBuffer()), LIBND4J_TYPES, FLOAT_TYPES);
input.tickReadHost();
alpha.tickReadHost();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void softMaxForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : nd4j::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = nd4j::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] = nd4j::math::nd4j_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate z[offset] / sum ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] /= shmem[0];
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void softMaxForVectorCudaLauncher(const cudaStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
softMaxForVectorCuda<T><<<1, MAX_NUM_THREADS, MAX_NUM_THREADS * sizeof(T) + 512, *stream>>>(vx, xzShapeInfo, vz);
}
//////////////////////////////////////////////////////////////////////////
void softmax(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
if(input.isVector()) {
if(rank == 1 || input.sizeAt(dimension) != 1) {
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
input.tickReadDevice();
}
else
output = 1.;
}
else {
auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDims(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDims(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
input.tickReadDevice();
}
PointersManager manager(context, "helpers::softmax");
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void logSoftMaxForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : nd4j::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = nd4j::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] = nd4j::math::nd4j_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate log(z[offset] / sum) ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] = nd4j::math::nd4j_log<T,T>(z[offset] / shmem[0]);
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void logSoftMaxForVectorCudaLauncher(const cudaStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
logSoftMaxForVectorCuda<T><<<1, MAX_NUM_THREADS, MAX_NUM_THREADS * sizeof(T) + 512, *stream>>>(vx, xzShapeInfo, vz);
}
//////////////////////////////////////////////////////////////////////////
void logSoftmax(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
if(input.isVector()) {
if(rank == 1 || input.sizeAt(dimension) != 1) {
BUILD_SINGLE_SELECTOR(input.dataType(), logSoftMaxForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
input.tickReadDevice();
}
else
output = 0.;
}
else {
auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDims(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDims(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output.applyTransform(transform::Log);
input.tickReadDevice();
}
PointersManager manager(context, "helpers::logSoftmax");
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ linkage void softMaxDerivForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : nd4j::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = nd4j::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] = nd4j::math::nd4j_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate (z[offset] / sum) and derivative z[offset] = z[offset] * (1 - z[offset]) ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo, len);
z[offset] /= shmem[0];
z[offset] *= (1.f - z[offset]); // derivative
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void softMaxDerivForVectorCudaLauncher(const cudaStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
softMaxDerivForVectorCuda<T><<<1, MAX_NUM_THREADS, MAX_NUM_THREADS * sizeof(T) + 512, *stream>>>(vx, xzShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void softmaxDerivative(nd4j::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
int temp;
if(shape::isCommonVector(input.getShapeInfo(), temp)) {
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxDerivForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
input.tickReadDevice();
}
else {
auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDims(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDims(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output *= (1.f - output); // derivative
input.tickReadDevice();
}
PointersManager manager(context, "helpers::softmaxDerivative");
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y, typename Z>
__global__ linkage void preluBPCuda(const void *vIn, const Nd4jLong *inShapeInfo,
const void *vAlpha, const Nd4jLong *alphaShapeInfo,
const void *vdLdO, const Nd4jLong *dLdOShapeInfo,
void *vdLdI, const Nd4jLong *dLdIShapeInfo,
void *vdLdA, const Nd4jLong *dLdAShapeInfo) {
const auto in = reinterpret_cast<const X*>(vIn);
const auto alpha = reinterpret_cast<const Y*>(vAlpha);
const auto dLdO = reinterpret_cast<const Z*>(vdLdO);
auto dLdI = reinterpret_cast<Z*>(vdLdI);
auto dLdA = reinterpret_cast<Z*>(vdLdA);
__shared__ Nd4jLong alphaLen;
if (threadIdx.x == 0)
alphaLen = shape::length(alphaShapeInfo);
__syncthreads();
const auto i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= alphaLen) return;
Nd4jLong inputIdxs[MAX_RANK*2];
int numIdxs = shape::outerArrayOffsets(inputIdxs, i, inShapeInfo, alphaShapeInfo);
Nd4jLong dLdOIdxs[MAX_RANK*2];
shape::outerArrayOffsets(dLdOIdxs, i, dLdOShapeInfo, alphaShapeInfo);
Nd4jLong dLdIIdxs[MAX_RANK*2];
shape::outerArrayOffsets(dLdIIdxs, i, dLdIShapeInfo, alphaShapeInfo);
const auto alphaOffset = shape::getIndexOffset(i, alphaShapeInfo, alphaLen);
const auto dLdAOffset = shape::getIndexOffset(i, dLdAShapeInfo, alphaLen);
for(Nd4jLong j = 0; j < numIdxs; ++j) {
const auto inInd = inputIdxs[j];
const auto dLdOInd = dLdOIdxs[j];
const auto dLdIInd = dLdIIdxs[j];
if(in[inInd] < 0) {
dLdI[dLdIInd] = dLdO[dLdOInd] * alpha[alphaOffset];
auto prevVal = dLdA[dLdAOffset];
prevVal = prevVal + dLdO[dLdOInd] * in[inInd];
dLdA[dLdAOffset] = prevVal;
}
else
dLdI[dLdIInd] = dLdO[dLdOInd];
}
}
template<typename X, typename Y, typename Z>
__host__ linkage void preluBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void *vIn, const Nd4jLong *inShapeInfo, const void *vAlpha, const Nd4jLong *alphaShapeInfo, const void *vdLdO, const Nd4jLong *dLdOShapeInfo, void *vdLdI, const Nd4jLong *dLdIShapeInfo, void *vdLdA, const Nd4jLong *dLdAShapeInfo) {
preluBPCuda<X, Y, Z><<<blocksPerGrid, threadsPerBlock, 1024, *stream>>>(vIn, inShapeInfo, vAlpha, alphaShapeInfo, vdLdO, dLdOShapeInfo, vdLdI, dLdIShapeInfo, vdLdA, dLdAShapeInfo);
}
//////////////////////////////////////////////////////////////////////////
void preluBP(nd4j::LaunchContext * context, const NDArray& input, const NDArray& alpha, const NDArray& dLdO, NDArray& dLdI, NDArray& dLdA) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
if(!alpha.isActualOnDeviceSide()) alpha.syncToDevice();
if(!dLdO.isActualOnDeviceSide()) dLdO.syncToDevice();
const auto xType = input.dataType();
const auto yType = alpha.dataType();
const auto zType = dLdO.dataType();
int threadsPerBlock = MAX_NUM_THREADS;
int blocksPerGrid = (alpha.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
BUILD_TRIPLE_SELECTOR(xType, yType, zType, preluBPCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), alpha.getSpecialBuffer(), alpha.getSpecialShapeInfo(), dLdO.getSpecialBuffer(), dLdO.getSpecialShapeInfo(), dLdI.getSpecialBuffer(), dLdI.getSpecialShapeInfo(), dLdA.getSpecialBuffer(), dLdA.getSpecialShapeInfo()), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
input.tickReadHost();
alpha.tickReadHost();
dLdO.tickReadHost();
dLdI.tickWriteDevice();
dLdA.tickWriteDevice();
}
template <typename T>
linkage void thresholdRelu_(NDArray const& input, double threshold, NDArray& output) {
auto routine = LAMBDA_T(_x, threshold) {
return _x > (T)threshold ? _x: (T)0.f;
};
const_cast<NDArray&>(input).applyLambda(routine, &output);
}
void thresholdRelu(nd4j::LaunchContext * context, NDArray const& input, double threshold, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), thresholdRelu_, (input, threshold, output), FLOAT_TYPES);
}
template <typename T>
linkage void thresholdReluDerivative_(NDArray* input, double theta, NDArray* dLdO, NDArray* output) {
}
void thresholdReluDerivative(nd4j::LaunchContext * context, NDArray* input, double threshold, NDArray* dLdO, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), thresholdReluDerivative_, (input, threshold, dLdO, output), FLOAT_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void thresholdReluDerivative_, (NDArray* input, double threshold, NDArray* dLdO, NDArray* output), FLOAT_TYPES);
BUILD_DOUBLE_TEMPLATE(template void preluCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz), LIBND4J_TYPES, FLOAT_TYPES);
BUILD_TRIPLE_TEMPLATE(template void preluBPCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void *vIn, const Nd4jLong *inShapeInfo, const void *vAlpha, const Nd4jLong *alphaShapeInfo, const void *vdLdO, const Nd4jLong *dLdOShapeInfo, void *vdLdI, const Nd4jLong *dLdIShapeInfo, void *vdLdA, const Nd4jLong *dLdAShapeInfo), LIBND4J_TYPES, FLOAT_TYPES, FLOAT_TYPES);
BUILD_SINGLE_TEMPLATE(template void softMaxForVectorCudaLauncher, (const cudaStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz), FLOAT_TYPES);
BUILD_SINGLE_TEMPLATE(template void softMaxDerivForVectorCudaLauncher, (const cudaStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz), FLOAT_TYPES);
}
}
}
|
ff770f06b92237c3d5878bbee2d9fd123488355d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void init_ptr_chase(size_t *p, int buf_size, int stride) {
int stride_num = buf_size / stride;
if (stride_num <= 1) {
return;
}
for (int i = 0; i < stride_num - 1; ++i) {
p[i * stride / sizeof(size_t)] = (size_t)(p + (i + 1) * stride / sizeof(size_t));
//printf("%p, %p\n", &(p[i * stride / sizeof(size_t)]), p[i * stride / sizeof(size_t)]);
//std::cerr << &(p[i * stride / sizeof(size_t)]) << " " << p[i * stride / sizeof(size_t)] << std::endl;
}
p[(stride_num - 1) * stride / sizeof(size_t)] = (size_t)p;
//printf("%p, %p\n", &(p[(stride_num - 1) * stride / sizeof(size_t)]), p[(stride_num - 1) * stride / sizeof(size_t)]);
}
__global__ void ldr_to_use_latency(int *out, size_t *in) {
size_t access_num = 1024 * 1024 * 64;
size_t *p = in;
for (size_t i = 0; i < access_num; ++i) {
size_t tmp = *p;
p = (size_t*)tmp;
}
*out = (int)*p;
}
| ff770f06b92237c3d5878bbee2d9fd123488355d.cu | #include <stdio.h>
__global__ void init_ptr_chase(size_t *p, int buf_size, int stride) {
int stride_num = buf_size / stride;
if (stride_num <= 1) {
return;
}
for (int i = 0; i < stride_num - 1; ++i) {
p[i * stride / sizeof(size_t)] = (size_t)(p + (i + 1) * stride / sizeof(size_t));
//printf("%p, %p\n", &(p[i * stride / sizeof(size_t)]), p[i * stride / sizeof(size_t)]);
//std::cerr << &(p[i * stride / sizeof(size_t)]) << " " << p[i * stride / sizeof(size_t)] << std::endl;
}
p[(stride_num - 1) * stride / sizeof(size_t)] = (size_t)p;
//printf("%p, %p\n", &(p[(stride_num - 1) * stride / sizeof(size_t)]), p[(stride_num - 1) * stride / sizeof(size_t)]);
}
__global__ void ldr_to_use_latency(int *out, size_t *in) {
size_t access_num = 1024 * 1024 * 64;
size_t *p = in;
for (size_t i = 0; i < access_num; ++i) {
size_t tmp = *p;
p = (size_t*)tmp;
}
*out = (int)*p;
}
|
e36e995d6ccee2f0fdd3ef5a9c5c8b4787a3260b.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=[8,8] --gridDim=[1,1]
#include <hip/hip_runtime.h>
#define _2D_ACCESS(A, y, x, X_DIM) A[(y)*(X_DIM)+(x)]
//////////////////////////////////////////////////////////////////////////////
//// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
//// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
//// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
//// PARTICULAR PURPOSE.
////
//// Copyright (c) Microsoft Corporation. All rights reserved
//////////////////////////////////////////////////////////////////////////////
//----------------------------------------------------------------------------
// File: TransitiveClosure.cpp
//
// Contains the implementation of algorithms which explores connectivity between
// nodes in a graph and determine shortest path.
// This is based on paper http://www.seas.upenn.edu/~kiderj/research/papers/APSP-gh08-fin-T.pdf
//----------------------------------------------------------------------------
// Defines to help with AMP->OpenCL translation
#define X_DIMENSION 0
#define Y_DIMENSION 1
// Constants - specifies tile size
#define TILE_SIZE (1 << 3)
#define num_vertices (1 << 6)
// State of connection
#define UNCONNECTED 0
#define DIRECTLY_CONNECTED 1
#define INDIRECTLY_CONNECTED 2
//----------------------------------------------------------------------------
// Stage1 - determine connectivity between vertexs' within a TILE - primary
//----------------------------------------------------------------------------
__global__ void transitive_closure_stage1_kernel(unsigned int* graph, int passnum)
{
// Load primary block into shared memory (primary_block_buffer)
__shared__ unsigned int primary_block_buffer[TILE_SIZE][TILE_SIZE];
// TODO: check that in OpenCL the order is 0=x, 1=y, 2=z (in AMP it is reversed)
int idxY = passnum * TILE_SIZE + threadIdx.y;
int idxX = passnum * TILE_SIZE + threadIdx.x;
primary_block_buffer[threadIdx.y][threadIdx.x] = _2D_ACCESS(graph, idxY, idxX, num_vertices);
#ifndef MUTATION
/* BUGINJECT: REMOVE_BARRIER, DOWN */
__syncthreads();
#endif
// Now perform the actual Floyd-Warshall algorithm on this block
for (unsigned int k = 0;
k < TILE_SIZE; ++k)
{
if ( primary_block_buffer[threadIdx.y][threadIdx.x] == UNCONNECTED)
{
if ( (primary_block_buffer[threadIdx.y][k] != UNCONNECTED) && (primary_block_buffer[k][threadIdx.x] != UNCONNECTED) )
{
primary_block_buffer[threadIdx.y][threadIdx.x] = passnum*TILE_SIZE + k + INDIRECTLY_CONNECTED;
}
}
__syncthreads();
}
_2D_ACCESS(graph, idxY, idxX, num_vertices) = primary_block_buffer[threadIdx.y][threadIdx.x];
}
| e36e995d6ccee2f0fdd3ef5a9c5c8b4787a3260b.cu | //pass
//--blockDim=[8,8] --gridDim=[1,1]
#include <cuda.h>
#define _2D_ACCESS(A, y, x, X_DIM) A[(y)*(X_DIM)+(x)]
//////////////////////////////////////////////////////////////////////////////
//// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
//// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
//// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
//// PARTICULAR PURPOSE.
////
//// Copyright (c) Microsoft Corporation. All rights reserved
//////////////////////////////////////////////////////////////////////////////
//----------------------------------------------------------------------------
// File: TransitiveClosure.cpp
//
// Contains the implementation of algorithms which explores connectivity between
// nodes in a graph and determine shortest path.
// This is based on paper http://www.seas.upenn.edu/~kiderj/research/papers/APSP-gh08-fin-T.pdf
//----------------------------------------------------------------------------
// Defines to help with AMP->OpenCL translation
#define X_DIMENSION 0
#define Y_DIMENSION 1
// Constants - specifies tile size
#define TILE_SIZE (1 << 3)
#define num_vertices (1 << 6)
// State of connection
#define UNCONNECTED 0
#define DIRECTLY_CONNECTED 1
#define INDIRECTLY_CONNECTED 2
//----------------------------------------------------------------------------
// Stage1 - determine connectivity between vertexs' within a TILE - primary
//----------------------------------------------------------------------------
__global__ void transitive_closure_stage1_kernel(unsigned int* graph, int passnum)
{
// Load primary block into shared memory (primary_block_buffer)
__shared__ unsigned int primary_block_buffer[TILE_SIZE][TILE_SIZE];
// TODO: check that in OpenCL the order is 0=x, 1=y, 2=z (in AMP it is reversed)
int idxY = passnum * TILE_SIZE + threadIdx.y;
int idxX = passnum * TILE_SIZE + threadIdx.x;
primary_block_buffer[threadIdx.y][threadIdx.x] = _2D_ACCESS(graph, idxY, idxX, num_vertices);
#ifndef MUTATION
/* BUGINJECT: REMOVE_BARRIER, DOWN */
__syncthreads();
#endif
// Now perform the actual Floyd-Warshall algorithm on this block
for (unsigned int k = 0;
k < TILE_SIZE; ++k)
{
if ( primary_block_buffer[threadIdx.y][threadIdx.x] == UNCONNECTED)
{
if ( (primary_block_buffer[threadIdx.y][k] != UNCONNECTED) && (primary_block_buffer[k][threadIdx.x] != UNCONNECTED) )
{
primary_block_buffer[threadIdx.y][threadIdx.x] = passnum*TILE_SIZE + k + INDIRECTLY_CONNECTED;
}
}
__syncthreads();
}
_2D_ACCESS(graph, idxY, idxX, num_vertices) = primary_block_buffer[threadIdx.y][threadIdx.x];
}
|
d099ebae1b872ed36c4b64fb1b1778d5ec83ce10.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <unistd.h>
#include <time.h>
#include <assert.h>
//#include <matrix_mul_kernel.cu>
#include <edge_cuda.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <time.h>
#include <util.h>
#include <ipv4.h>
#include <ipv6.h>
#include <arpa/inet.h>
#include <common.h>
#define CEIL(x, y) ( (x)/(y) + ( (x)%(y) ? 1 : 0 ) )
#define MAX(x, y) ( (x)>(y) ? (x) : (y) )
#define IPV6_REG_NUM 16
/**< [xia-router0 - xge0,1,2,3], [xia-router1 - xge0,1,2,3] */
LL src_mac_arr[2][4] = {{0x36d3bd211b00, 0x37d3bd211b00, 0xa8d6a3211b00, 0xa9d6a3211b00},
{0x44d7a3211b00, 0x45d7a3211b00, 0x0ad7a3211b00, 0x0bd7a3211b00}};
/**< [xia-router2 - xge0,1,4,5], [xia-router2 - xge2,3,6,7] */
LL dst_mac_arr[2][4] = {{0x6c10bb211b00, 0x6d10bb211b00, 0xc8a610ca0568, 0xc9a610ca0568},
{0x64d2bd211b00, 0x65d2bd211b00, 0xa2a610ca0568, 0xa3a610ca0568}};
uint64_t rss_seed = 0xdeadbeef;
int nStreams = 32;
hipStream_t* streams = NULL;
unsigned gTimerEventPeriod = 1000;
unsigned gTimerEventPeriodBatch = 200;
bool swizzle = false;
int n_batches = 1;
int n_requests_per_batch = 32;
bool save_regs = true;
int schedule_batch_size = 64;
#define CEIL(x, y) ( (x)/(y) + ( (x)%(y) ? 1 : 0 ) )
#define BLOCK_SIZE 32
#define MAX_THREADS_PER_BLOCK 1024
#define DEFAULT_MATRIX_SIZE 128
struct kernel_args {
unsigned buffer_size;
unsigned batch_size;
unsigned n_batches;
void* h_packet_buffer;
void* h_response_buffer;
void* g_packet_buffer;
void* g_response_buffer;
};
struct MemcParam
{
struct pkt_hdr_normal* packet_buf;
int* gpu_tbl24;
int* gpu_tbl8;
unsigned n;
int* reg_buffer;
bool save_regs;
};
struct MemcParamSwizzle
{
struct pkt_hdr_batch* packet_buf;
int* gpu_tbl24;
int* gpu_tbl8;
unsigned n;
};
enum RunConfig {
BASE_IPV6=0,
EVENT_IPV6,
TIMER_IPV6,
EVENT_TIMER_BG_IPV6,
EVENT_TIMER_BATCH_BG_IPV6,
BG_TASK
};
enum ScheduleType {
SINGLE=0,
TIMER,
BATCH
};
enum BgTaskType {
CONV=0,
MATRIX_MUL,
BACKPROP,
BFS
};
int bg_task_type = MATRIX_MUL;
#define CU_CHECK_ERR(err) \
if ( err != hipSuccess ) { \
printf("CUDA Error: %s\n", hipGetErrorString(hipGetLastError())); \
abort(); \
}
void randomInit(float* data, int size);
void configureParamMem(MemcParam* paramMem, size_t totalBufferSize, size_t batchSize, size_t maxBatches, struct kernel_args* args);
void configureParamMemSwizzle(MemcParamSwizzle* paramMem, size_t totalBufferSize, size_t batchSize, size_t maxBatches, struct kernel_args* args);
void PrintMatrices(float* h_A, float* h_B, float* h_C, int dimAx, int dimAy, int dimBx, int dimBy, int dimCx, int dimCy);
int run_conv_kernel(int argc, char **argv, bool block, bool warmup);
int IpForwardEDGE(int argc, char** argv, bool RunBackgroundTask, ScheduleType scheduleType, bool swizzle);
int IpForwardBase(int argc, char** argv, bool swizzle);
void generate_dummy_packet(struct pkt_hdr* pkt, unsigned gen_type);
unsigned init_normal_requests(struct kernel_args* args, bool alloc_response, int g_batch_size, int g_num_batches);
unsigned init_swizzle_requests(struct kernel_args* args, bool alloc_response, int g_batch_size, int g_num_batches);
void normal_packet(struct pkt_hdr_normal* pkt_hdr_normal_ptr, struct pkt_hdr* pkt, unsigned pkt_ind);
int in_cksum(unsigned char *buf, unsigned nbytes, int sum) ;
void swizzle_packet(struct pkt_hdr_batch* pkt_hdr_batch_ptr, struct pkt_hdr* pkt, unsigned pkt_ind);
static u_int32_t wrapsum (u_int32_t sum) ;
struct rte_lpm *ipv4_init();
void randomInit(float* data, int size);
int MatrixMulBase(int argc, char** argv, bool block);
unsigned init_ipv6_normal_requests(struct kernel_args* args, struct ipv6_prefix* prefix_arr, unsigned prefix_ind, int g_batch_size, int g_num_batches);
void ipv6_normal_packet(struct ipv6_pkt_hdr_normal* pkt_hdr_normal_ptr, struct ipv6_pkt_hdr* pkt, unsigned pkt_ind);
void ipv6_generate_dummy_packet(struct ipv6_pkt_hdr* pkt, struct ipv6_prefix* pfa);
static u_int32_t wrapsum (u_int32_t sum) ;
struct rte_lpm6 *ipv6_init();
//kernels
__global__ void ipv6_fwd_kernel(ipv6_pkt_hdr_normal* packet_batch, uint16_t* tbl24, uint16_t* tbl8, unsigned n_pkts, int* reg_buffer, bool save_regs);
__global__ void ipv6_fwd_kernel_save_regs(ipv6_pkt_hdr_normal* packet_batch, uint16_t* tbl24, uint16_t* tbl8, unsigned n_pkts, int* reg_buffer, bool save_regs);
extern "C" __global__ void matrixMul( float* C, float* A, float* B, int wA, int wB);
void _filterActs(float *images, int images_cols, int images_rows, float *filters, int filters_cols,
int filters_rows, float *targets, int targets_cols, int targets_rows,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput, int conv, hipStream_t stream, bool warmup);
void run_backprop(int argc, char **argv, bool block);
extern "C" int setup(int argc, char** argv);
int bfs_main(int argc, char** argv);
void run_bfs(int argc, char **argv, bool block) {
printf("MARIA inside run_bfs\n");
bfs_main(argc, argv);
}
int div_up(int n, int d) {
return n / d + (((n < 0) ^ (d > 0)) && (n % d));
}
int main(int argc, char** argv) {
std::cout << "=== EDGE BEGIN ===" << std::endl;
int ret = 0;
RunConfig testToRun = EVENT_IPV6;
int opt;
opterr = 0;
int count = 0;
while( (opt = getopt(argc, argv, "t:s:g:q:z:p:c:b:n:g:i:") ) != -1 ) {
switch(opt) {
case 't':
count += 2;
testToRun = (RunConfig)atoi(optarg);
break;
case 'p':
count += 2;
gTimerEventPeriod = atoi(optarg);
break;
case 'i':
count += 2;
gTimerEventPeriodBatch = atoi(optarg);
break;
case 's':
count += 2;
swizzle = atoi(optarg);
break;
case 'n':
count += 2;
n_batches = atoi(optarg);
break;
case 'b':
count += 2;
n_requests_per_batch = atoi(optarg);
break;
case 'g':
count += 2;
bg_task_type = atoi(optarg);
break;
default:
std::cout << "Error: Unknown parameter: " << opt << std::endl;
abort();
}
}
streams = new hipStream_t[nStreams];
for( unsigned i=0; i<nStreams; ++i ) {
CU_CHECK_ERR( hipStreamCreate(&streams[i]) );
}
char** modArgv = &argv[count];
int modArgc = argc-count;
switch( testToRun ) {
case BASE_IPV6:
ret = IpForwardBase(modArgc, modArgv, swizzle);
break;
case EVENT_IPV6:
ret = IpForwardEDGE(modArgc, modArgv, false, SINGLE, swizzle);
break;
case TIMER_IPV6:
ret = IpForwardEDGE(modArgc, modArgv, false, TIMER, swizzle);
break;
case EVENT_TIMER_BG_IPV6:
ret = IpForwardEDGE(modArgc, modArgv, true, TIMER, swizzle);
break;
case EVENT_TIMER_BATCH_BG_IPV6:
ret = IpForwardEDGE(modArgc, modArgv, true, BATCH, swizzle);
break;
case BG_TASK:
printf("Running only background task: %d \n", bg_task_type);
if (bg_task_type == CONV) {
printf("Running background task: conv\n");
run_conv_kernel(argc, argv, true, false);
}
if (bg_task_type == MATRIX_MUL) {
MatrixMulBase(argc, argv, true);
}
if (bg_task_type == BACKPROP) {
run_backprop(argc, argv, true);
}
if (bg_task_type == BFS) {
run_bfs(argc, argv, true);
}
ret = 0;
break;
default:
std::cout << "Error: Undefined test configuration # (" << testToRun << ")" << std::endl;
break;
}
if( ret ) {
std::cout << "Error running test " << testToRun << " - Error=" << ret << std::endl;
}
std::cout << "=== EDGE END ===" << std::endl;
return ret;
}
int IpForwardBase(int argc, char** argv, bool swizzle) {
printf("IpForward EDGE Base test. Swizzle = %d\n", swizzle);
struct kernel_args k_args;
struct rte_lpm6 *lpm;
int num_prefixes = IPV6_NUM_RAND_PREFIXES;
int prefix_mem_size = num_prefixes * sizeof(struct ipv6_prefix);
struct ipv6_prefix *prefix_arr = (struct ipv6_prefix*)malloc(prefix_mem_size);
int mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * IPV6_NUM_TBL8);
int rules_size = sizeof(struct rte_lpm6_rule) * 100000;
/* Allocate memory to store the LPM data structures. Zero out counters. */
lpm = (struct rte_lpm6 *) lpm6_hrd_malloc_socket(RTE_LPM6_SHM_KEY,
mem_size, 0);
int prefix_arr_i = rand() % IPV6_NUM_RAND_PREFIXES;
printf("Mem init trick - do ipv6_init in CPU\n");
CU_CHECK_ERR( edgeExtraipv6(1, (void*)lpm, IPV6_XIA_R2_PORT_MASK, (void*)prefix_arr, 1,n_requests_per_batch, n_batches ) );
init_ipv6_normal_requests(&k_args, prefix_arr, prefix_arr_i,n_requests_per_batch, n_batches);
/**< rte_lpm_tbl24_entry ~ rte_lpm_tbl8_entry ~ uint16_t */
int entry_sz = sizeof(struct rte_lpm6_tbl_entry);
int tbl24_bytes = RTE_LPM6_TBL24_NUM_ENTRIES * entry_sz;
int tbl8_bytes = (IPV6_NUM_TBL8 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES) * entry_sz;
int* gpu_tbl24 = 0;
int* gpu_tbl8 = 0;
/**< Alloc and copy tbl24 and tbl8 arrays to GPU memory */
printf("\tGPU master: alloc tbl24 (size = %lf MB) on device\n", (float)tbl24_bytes / 1e6);
CU_CHECK_ERR(hipMalloc(&gpu_tbl24, tbl24_bytes));
CU_CHECK_ERR(hipMemcpy(gpu_tbl24, lpm->tbl24, tbl24_bytes, hipMemcpyHostToDevice));
printf("\tGPU master: alloc tbl8 (size = %lf MB) on device\n", (float)tbl8_bytes / 1e6);
CU_CHECK_ERR(hipMalloc(&gpu_tbl8, tbl8_bytes));
CU_CHECK_ERR(hipMemcpy(gpu_tbl8, lpm->tbl8, tbl8_bytes, hipMemcpyHostToDevice));
CU_CHECK_ERR(hipMemcpy(k_args.g_packet_buffer, k_args.h_packet_buffer, k_args.buffer_size, hipMemcpyHostToDevice));
//launch kernel
dim3 block(n_requests_per_batch, 1, 1);
dim3 grid(n_batches, 1, 1);
unsigned n_packets = k_args.n_batches * k_args.batch_size;
hipLaunchKernelGGL(( ipv6_fwd_kernel), dim3(grid), dim3(block), 0, 0, (ipv6_pkt_hdr_normal*)k_args.g_packet_buffer, (uint16_t *)gpu_tbl24, (uint16_t *)gpu_tbl8, n_packets, NULL, false);
hipDeviceSynchronize();
return 0;
}
int IpForwardEDGE(int argc, char** argv, bool RunBackgroundTask, ScheduleType scheduleType, bool swizzle) {
printf("IpForward EDGE Test RunBackgroundTask: %d, ScheduleType: %d, Swizzle: %d\n", RunBackgroundTask, scheduleType, swizzle);
int MaxEventsNum = n_batches;
unsigned single_buffer_alloc_size = (n_requests_per_batch * sizeof(struct ipv6_pkt_hdr_normal));
struct kernel_args *k_args = (struct kernel_args *)malloc(MaxEventsNum*sizeof(struct kernel_args));
struct rte_lpm6 *lpm;
//struct ipv6_prefix *prefix_arr;
int num_prefixes = IPV6_NUM_RAND_PREFIXES;
int prefix_mem_size = num_prefixes * sizeof(struct ipv6_prefix);
struct ipv6_prefix *prefix_arr = (struct ipv6_prefix*)malloc(prefix_mem_size);
int mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * IPV6_NUM_TBL8);
int rules_size = sizeof(struct rte_lpm6_rule) * 100000;
/* Allocate memory to store the LPM data structures. Zero out counters. */
lpm = (struct rte_lpm6 *) lpm6_hrd_malloc_socket(RTE_LPM6_SHM_KEY,
mem_size, 0);
//lpm = ipv6_init(IPV6_XIA_R2_PORT_MASK, &prefix_arr, 1);
int prefix_arr_i = rand() % IPV6_NUM_RAND_PREFIXES;
printf("Mem init trick - do ipv6_init in CPU\n");
CU_CHECK_ERR( edgeExtraipv6(1, (void*)lpm, IPV6_XIA_R2_PORT_MASK, (void*)prefix_arr, 1,n_requests_per_batch, n_batches ) );
// initialize host memory
for( unsigned batch=0; batch<MaxEventsNum; ++batch ) {
unsigned buffer_alloc_size = init_ipv6_normal_requests(&k_args[batch], prefix_arr, prefix_arr_i,n_requests_per_batch, n_batches);
//printf("Generated input packets for batch %d. buffer_alloc_size = %lld\n", single_buffer_alloc_size);
}
int entry_sz = sizeof(struct rte_lpm6_tbl_entry);
//int entry_sz = sizeof(struct rte_lpm_tbl24_entry);
int tbl24_bytes = RTE_LPM6_TBL24_NUM_ENTRIES * entry_sz;
int tbl8_bytes = (IPV6_NUM_TBL8 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES) * entry_sz;
int* gpu_tbl24 = 0;
int* gpu_tbl8 = 0;
/**< Alloc and copy tbl24 and tbl8 arrays to GPU memory */
printf("\tGPU master: alloc tbl24 (size = %lf MB) on device\n", (float)tbl24_bytes / 1e6);
CU_CHECK_ERR(hipMalloc(&gpu_tbl24, tbl24_bytes));
CU_CHECK_ERR(hipMemcpy(gpu_tbl24, lpm->tbl24, tbl24_bytes, hipMemcpyHostToDevice));
printf("\tGPU master: alloc tbl8 (size = %lf MB) on device\n", (float)tbl8_bytes / 1e6);
CU_CHECK_ERR(hipMalloc(&gpu_tbl8, tbl8_bytes));
CU_CHECK_ERR(hipMemcpy(gpu_tbl8, lpm->tbl8, tbl8_bytes, hipMemcpyHostToDevice));
//------------------------------------------//
// setup execution parameters
dim3 block(min(n_requests_per_batch, MAX_THREADS_PER_BLOCK), 1, 1);
dim3 grid(div_up(n_requests_per_batch, MAX_THREADS_PER_BLOCK), 1, 1);
// Register the event kernel
int eventId;
eventId = cudaRegisterEvent((void*)ipv6_fwd_kernel, (void*)ipv6_fwd_kernel_save_regs, grid, block, 0);
//Setup the arguments
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(struct ipv6_pkt_hdr_normal*), 0) ); //packet buffer
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(uint16_t*), 8) ); //gpu_tbl24
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(uint16_t*), 16) ); //gpu_tbl8
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(unsigned), 24) ); //n_packets
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(int*), 32) ); //reg_buffer
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(bool), 40) ); //save_regs
// Configure the parameter memory
unsigned paramSize = sizeof(MemcParam);
MemcParam* paramMem = (MemcParam*)cudaConfigureEventParam(eventId, paramSize, MaxEventsNum, false);
printf("MARIA DEBUG allocated param mem = %lld \n", paramMem);
configureParamMem(paramMem, single_buffer_alloc_size, n_requests_per_batch, MaxEventsNum, k_args);
//copy from host to gpu
MemcParam* curParam = paramMem;
for( unsigned batch=0; batch<MaxEventsNum; ++batch ) {
CU_CHECK_ERR(hipMemcpy(curParam->packet_buf, k_args[batch].h_packet_buffer, single_buffer_alloc_size, hipMemcpyHostToDevice));
curParam->gpu_tbl24 = gpu_tbl24;
curParam->gpu_tbl8 = gpu_tbl8;
curParam++;
}
//////////////////////HACK
paramMem = (MemcParam*)cudaConfigureEventParam(eventId, paramSize, MaxEventsNum, true);
////////////////////////
printf("Scheduling EDGE event\n");
if (scheduleType == TIMER) {
//Schedule the event kernel to run on a timer
CU_CHECK_ERR( cudaScheduleTimerEvent(eventId, gTimerEventPeriod) );
} else if (scheduleType==SINGLE) { //event
for( unsigned batch=0; batch<MaxEventsNum; ++batch ) {
CU_CHECK_ERR( cudaScheduleEvent(eventId) );
};
} else { //batch
CU_CHECK_ERR( cudaScheduleEventTimerBatch(eventId, gTimerEventPeriod, schedule_batch_size, gTimerEventPeriodBatch) );
};
if (RunBackgroundTask) {
if (bg_task_type == CONV) {
printf("Running background task: conv\n");
run_conv_kernel(argc, argv, false, false);
}
if (bg_task_type == MATRIX_MUL) {
MatrixMulBase(argc, argv, false);
}
if (bg_task_type == BACKPROP) {
run_backprop(argc, argv, true);
}
if (bg_task_type == BFS) {
run_bfs(argc, argv, true);
}
}
CU_CHECK_ERR( hipDeviceSynchronize() );
std::cout << "Success!" << std::endl;
return 0;
}
void configureParamMem(MemcParam* paramMem, size_t totalBufferSize, size_t batchSize, size_t maxBatches, struct kernel_args* args)
{
MemcParam* curParam = paramMem;
for( unsigned batch=0; batch<maxBatches; ++batch ) {
//CU_CHECK_ERR( hipMalloc((void**)&curParam->packet_buf, totalBufferSize) );
curParam->packet_buf = (struct pkt_hdr_normal *)args[batch].g_packet_buffer;
int reg_buffer_size = 32 * IPV4_REG_NUM * 512;
CU_CHECK_ERR(hipMalloc(&curParam->reg_buffer, reg_buffer_size));
curParam->n = batchSize;
curParam->save_regs = true;
curParam++;
}
}
void configureParamMemSwizzle(MemcParamSwizzle* paramMem, size_t totalBufferSize, size_t batchSize, size_t maxBatches, struct kernel_args* args)
{
MemcParamSwizzle* curParam = paramMem;
for( unsigned batch=0; batch<maxBatches; ++batch ) {
//CU_CHECK_ERR( hipMalloc((void**)&curParam->packet_buf, totalBufferSize) );
curParam->packet_buf = (struct pkt_hdr_batch*)args[batch].g_packet_buffer;
curParam->n = batchSize;
//curParam->save_regs = save_regs;
curParam++;
}
}
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
//////////////////////////////////////////////////////IP-FORWARDING related functions /////////////////////////////////////////////
unsigned init_normal_requests(struct kernel_args* args, bool alloc_response, int g_batch_size, int g_num_batches)
{
unsigned total_num_requests = g_batch_size * g_num_batches;
unsigned buffer_alloc_size = (g_num_batches * g_batch_size * sizeof(struct pkt_hdr_normal));
struct pkt_hdr_normal* packet_buffer = NULL;
struct pkt_hdr_normal* response_buffer = NULL;
struct pkt_hdr_normal* gpu_packet_buffer = NULL;
struct pkt_hdr_normal* gpu_response_buffer = NULL;
CU_CHECK_ERR(hipMalloc(&gpu_packet_buffer, buffer_alloc_size));
packet_buffer = (pkt_hdr_normal*)malloc(buffer_alloc_size);
if (alloc_response) {
CU_CHECK_ERR(hipMalloc(&gpu_response_buffer, buffer_alloc_size));
response_buffer = (pkt_hdr_normal*)malloc(buffer_alloc_size);
}
struct pkt_hdr pkt;
for (unsigned i=0; i<g_num_batches; ++i) {
for (unsigned j=0; j<g_batch_size; ++j) {
// Load in the actual packet
generate_dummy_packet(&pkt, 1);
unsigned ind = i*g_batch_size + j;
normal_packet(packet_buffer, &pkt, ind);
}
}
assert(args);
args->buffer_size = buffer_alloc_size;
args->batch_size = g_batch_size;
args->n_batches = g_num_batches;
args->h_packet_buffer = (void*)packet_buffer;
args->h_response_buffer = (void*)response_buffer;
args->g_packet_buffer = gpu_packet_buffer;
args->g_response_buffer = gpu_response_buffer;
return buffer_alloc_size;
}
unsigned init_swizzle_requests(struct kernel_args* args, bool alloc_response, int g_batch_size, int g_num_batches)
{
int res = hipSuccess;
unsigned total_num_requests = g_batch_size * g_num_batches;
unsigned buffer_alloc_size = (g_num_batches * sizeof(struct pkt_hdr_batch));
struct pkt_hdr_batch* packet_buffer = NULL;
struct pkt_hdr_batch* response_buffer = NULL;
struct pkt_hdr_normal* gpu_packet_buffer = 0;
struct pkt_hdr_normal* gpu_response_buffer = 0;
CU_CHECK_ERR(hipMalloc(&gpu_packet_buffer, buffer_alloc_size));
packet_buffer = (pkt_hdr_batch*)malloc(buffer_alloc_size);
if (alloc_response) {
CU_CHECK_ERR(hipMalloc(&gpu_response_buffer, buffer_alloc_size));
response_buffer = (pkt_hdr_batch*)malloc(buffer_alloc_size);
}
unsigned pkt_to_print = 312;
bool verbose = false;
struct pkt_hdr pkt;
for (unsigned i=0; i<g_num_batches; ++i) {
for (unsigned j=0; j<g_batch_size; ++j) {
// Load in the actual packet
generate_dummy_packet(&pkt, 1);
if (verbose && j == pkt_to_print) {
//print_pkt_hdr(&pkt);
}
swizzle_packet(&packet_buffer[i], &pkt, j);
}
}
if (verbose)
//print_swizzled_packet(&packet_buffer[0], pkt_to_print);
assert(args);
args->buffer_size = buffer_alloc_size;
args->batch_size = g_batch_size;
args->n_batches = g_num_batches;
args->h_packet_buffer = (void*)packet_buffer;
args->h_response_buffer = (void*)response_buffer;
args->g_packet_buffer = gpu_packet_buffer;
args->g_response_buffer = gpu_response_buffer;
return buffer_alloc_size;
}
void swizzle_packet(struct pkt_hdr_batch* pkt_hdr_batch_ptr, struct pkt_hdr* pkt, unsigned pkt_ind)
{
memcpy((u_int8_t*)&pkt_hdr_batch_ptr->ether_dhost_1[pkt_ind], pkt->eh.ether_dhost, 4);
memcpy((u_int8_t*)&pkt_hdr_batch_ptr->ether_dhost_2[pkt_ind], pkt->eh.ether_dhost + 4, 2);
memcpy((u_int8_t*)&pkt_hdr_batch_ptr->ether_shost_1[pkt_ind], pkt->eh.ether_shost, 4);
memcpy((u_int8_t*)&pkt_hdr_batch_ptr->ether_shost_2[pkt_ind], pkt->eh.ether_shost + 4, 2);
pkt_hdr_batch_ptr->ether_type[pkt_ind] = (u_int32_t)pkt->eh.ether_type;
pkt_hdr_batch_ptr->ip_version[pkt_ind] = (u_int32_t)pkt->iph.version;
pkt_hdr_batch_ptr->ip_tos[pkt_ind] = (u_int32_t)pkt->iph.tos;
pkt_hdr_batch_ptr->ip_tot_len[pkt_ind] = (u_int32_t)pkt->iph.tot_len;
pkt_hdr_batch_ptr->ip_id[pkt_ind] = (u_int32_t)pkt->iph.id;
pkt_hdr_batch_ptr->ip_frag_off[pkt_ind] = (u_int32_t)pkt->iph.frag_off;
pkt_hdr_batch_ptr->ip_ttl[pkt_ind] = (u_int32_t)pkt->iph.ttl;
pkt_hdr_batch_ptr->ip_protocol[pkt_ind] = (u_int32_t)pkt->iph.protocol;
pkt_hdr_batch_ptr->ip_check[pkt_ind] = (u_int32_t)pkt->iph.check;
pkt_hdr_batch_ptr->ip_saddr[pkt_ind] = (u_int32_t)pkt->iph.saddr;
pkt_hdr_batch_ptr->ip_daddr[pkt_ind] = (u_int32_t)pkt->iph.daddr;
pkt_hdr_batch_ptr->udp_source[pkt_ind] = (u_int32_t)pkt->uh.source;
pkt_hdr_batch_ptr->udp_dest[pkt_ind] = (u_int32_t)pkt->uh.dest;
pkt_hdr_batch_ptr->udp_len[pkt_ind] = (u_int32_t)pkt->uh.len;
pkt_hdr_batch_ptr->udp_check[pkt_ind] = (u_int32_t)pkt->uh.check;
}
unsigned init_ipv6_normal_requests(struct kernel_args* args, struct ipv6_prefix* prefix_arr, unsigned prefix_ind, int g_batch_size, int g_num_batches)
{
unsigned total_num_requests = g_batch_size * g_num_batches;
unsigned buffer_alloc_size = (g_num_batches * g_batch_size * sizeof(struct ipv6_pkt_hdr_normal));
struct ipv6_pkt_hdr_normal* packet_buffer = NULL;
struct ipv6_pkt_hdr_normal* gpu_packet_buffer = NULL;
hipMalloc(&gpu_packet_buffer, buffer_alloc_size);
packet_buffer = (ipv6_pkt_hdr_normal*)malloc(buffer_alloc_size);
struct ipv6_pkt_hdr pkt;
for (unsigned i=0; i<g_num_batches; ++i) {
for (unsigned j=0; j<g_batch_size; ++j) {
// Load in the actual packet
ipv6_generate_dummy_packet(&pkt, &prefix_arr[prefix_ind]);
prefix_ind = (prefix_ind+1) % IPV6_NUM_RAND_PREFIXES;
unsigned ind = i*g_batch_size + j;
ipv6_normal_packet(packet_buffer, &pkt, ind);
}
}
assert(args);
args->buffer_size = buffer_alloc_size;
args->batch_size = g_batch_size;
args->n_batches = g_num_batches;
args->h_packet_buffer = (void*)packet_buffer;
args->h_response_buffer = NULL;
args->g_packet_buffer = gpu_packet_buffer;
args->g_response_buffer = 0;
return buffer_alloc_size;
}
void ipv6_normal_packet(struct ipv6_pkt_hdr_normal* pkt_hdr_normal_ptr, struct ipv6_pkt_hdr* pkt, unsigned pkt_ind)
{
// ETH
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_dhost_1, pkt->eh.ether_dhost, 4);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_dhost_2, pkt->eh.ether_dhost + 4, 2);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_shost_1, pkt->eh.ether_shost, 4);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_shost_2, pkt->eh.ether_shost + 4, 2);
pkt_hdr_normal_ptr[pkt_ind].ether_type = (u_int32_t)pkt->eh.ether_type;
// IPH
pkt_hdr_normal_ptr[pkt_ind].ip_vtc_flow = (u_int32_t)pkt->iph.vtc_flow;
pkt_hdr_normal_ptr[pkt_ind].ip_payload_len = (u_int32_t)pkt->iph.payload_len;
pkt_hdr_normal_ptr[pkt_ind].ip_proto = (u_int32_t)pkt->iph.proto;
pkt_hdr_normal_ptr[pkt_ind].ip_hop_limits = (u_int32_t)pkt->iph.hop_limits;
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_saddr1, &pkt->iph.src_addr[0], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_saddr2, &pkt->iph.src_addr[4], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_saddr3, &pkt->iph.src_addr[8], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_saddr4, &pkt->iph.src_addr[12], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_daddr1, &pkt->iph.dst_addr[0], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_daddr2, &pkt->iph.dst_addr[4], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_daddr3, &pkt->iph.dst_addr[8], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_daddr4, &pkt->iph.dst_addr[12], 4);
// UDPH
pkt_hdr_normal_ptr[pkt_ind].udp_source = (u_int32_t)pkt->uh.source;
pkt_hdr_normal_ptr[pkt_ind].udp_dest = (u_int32_t)pkt->uh.dest;
pkt_hdr_normal_ptr[pkt_ind].udp_len = (u_int32_t)pkt->uh.len;
pkt_hdr_normal_ptr[pkt_ind].udp_check = (u_int32_t)pkt->uh.check;
}
unsigned g_pkt_id=0;
void generate_dummy_packet(struct pkt_hdr* pkt, unsigned gen_type)
{
unsigned pkt_size = sizeof(struct pkt_hdr);
if (gen_type == 0) {
u_int32_t src_ip = 0xC0A80002 /* from 192.168.0.2 */;
u_int32_t dst_ip = 0xC0A80104 /* 192.168.1.4 */;
// Ethernet
pkt->eh.ether_type = htons(0x0800);
pkt->eh.ether_shost[0] = 0x68;
pkt->eh.ether_shost[1] = 0x05;
pkt->eh.ether_shost[2] = 0xCA;
pkt->eh.ether_shost[3] = 0x13;
pkt->eh.ether_shost[4] = 0xCE;
pkt->eh.ether_shost[5] = 0x79;
pkt->eh.ether_dhost[0] = 0x68;
pkt->eh.ether_dhost[1] = 0x05;
pkt->eh.ether_dhost[2] = 0xCA;
pkt->eh.ether_dhost[3] = 0x1B;
pkt->eh.ether_dhost[4] = 0x1E;
pkt->eh.ether_dhost[5] = 0x66;
// IP
//pkt->iph.ihl = 5;
pkt->iph.version = 4;
pkt->iph.tos = 0;
pkt->iph.tot_len = htons(pkt_size - sizeof(ether_header));
pkt->iph.id = htons(g_pkt_id++);
pkt->iph.ttl = 64;
pkt->iph.frag_off = htons(0);
pkt->iph.protocol = IPPROTO_UDP;
pkt->iph.daddr = htonl(dst_ip);
pkt->iph.saddr = htonl(src_ip);
pkt->iph.check = wrapsum(in_cksum((unsigned char *)&pkt->iph, sizeof(struct ip_header), 0));
// UDP
pkt->uh.source = htons(9191);
pkt->uh.dest = htons(9960);
pkt->uh.len = htons(pkt_size - sizeof(ether_header) - sizeof(ip_header));
pkt->uh.check = 0; /* It must be 0 to compute the checksum */
//i = sizeof(struct ether_header) + sizeof(struct ip_header) + sizeof(struct udp_header);
/*udp_header->check = wrapsum(in_cksum((unsigned char *)udp_header, sizeof(struct udp_header),
in_cksum((unsigned char *)&buffer[i], send_len-i,
in_cksum((unsigned char *)&ip_header->saddr,
2*sizeof(ip_header->saddr),
IPPROTO_UDP + ntohs(udp_header->len)))));*/
} else if (gen_type == 1) {
set_mac(&pkt->eh.ether_shost[0], src_mac_arr[0][0]);
set_mac(&pkt->eh.ether_dhost[0], dst_mac_arr[0][0]);
pkt->eh.ether_type = htons(0x0800);
pkt->iph.version = 0x40 | 0x05;
pkt->iph.tos = 0;
pkt->iph.tot_len = htons(pkt_size - sizeof(ether_header));
pkt->iph.id = htons(g_pkt_id++);
pkt->iph.ttl = 64;
pkt->iph.frag_off = htons(0);
pkt->iph.protocol = IPPROTO_UDP;
pkt->iph.saddr = htonl(fastrand(&rss_seed));
pkt->iph.daddr = htonl(fastrand(&rss_seed));
pkt->iph.check = wrapsum(in_cksum((unsigned char *)&pkt->iph, sizeof(struct ip_header), 0));
// UDP
pkt->uh.source = htons(9191);
pkt->uh.dest = htons(9960);
pkt->uh.len = htons(pkt_size - sizeof(ether_header) - sizeof(ip_header));
pkt->uh.check = 0; /* It must be 0 to compute the checksum */
//if (g_pkt_id < 4) {
// print_pkt_hdr(pkt);
//}
} else {
//cout << "Error: Unknown gen_type = " << gen_type << endl;
abort();
}
return;
}
//unsigned g_pkt_id=0;
void ipv6_generate_dummy_packet(struct ipv6_pkt_hdr* pkt, struct ipv6_prefix* pfa)
{
unsigned pkt_size = sizeof(struct ipv6_pkt_hdr);
set_mac(&pkt->eh.ether_shost[0], src_mac_arr[0][0]);
set_mac(&pkt->eh.ether_dhost[0], dst_mac_arr[0][0]);
pkt->eh.ether_type = htons(0x0800);
pkt->iph.vtc_flow = 0;
pkt->iph.payload_len = 2 + sizeof(int) + sizeof(LL);
pkt->iph.proto = IPPROTO_IPV6;
pkt->iph.hop_limits = 64;
memcpy(pkt->iph.src_addr, pfa->bytes, IPV6_ADDR_LEN);
memcpy(pkt->iph.dst_addr, pfa->bytes, IPV6_ADDR_LEN);
// UDP
pkt->uh.source = htons(9191);
pkt->uh.dest = htons(9960);
pkt->uh.len = htons(pkt_size - sizeof(ether_header) - sizeof(struct ipv6_hdr));
pkt->uh.check = 0; /* It must be 0 to compute the checksum */
}
int in_cksum(unsigned char *buf, unsigned nbytes, int sum)
{
uint i;
/* Checksum all the pairs of bytes first... */
for (i = 0; i < (nbytes & ~1U); i += 2) {
sum += (u_int16_t) ntohs(*((u_int16_t *)(buf + i)));
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
/* If there's a single byte left over, checksum it, too. Network
byte order is big-endian, so the remaining byte is the high byte. */
if(i < nbytes) {
sum += buf [i] << 8;
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
return sum;
}
static u_int32_t wrapsum (u_int32_t sum)
{
sum = ~sum & 0xFFFF;
return htons(sum);
}
void normal_packet(struct pkt_hdr_normal* pkt_hdr_normal_ptr, struct pkt_hdr* pkt, unsigned pkt_ind)
{
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_dhost_1, pkt->eh.ether_dhost, 4);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_dhost_2, pkt->eh.ether_dhost + 4, 2);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_shost_1, pkt->eh.ether_shost, 4);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_shost_2, pkt->eh.ether_shost + 4, 2);
pkt_hdr_normal_ptr[pkt_ind].ether_type = (u_int32_t)pkt->eh.ether_type;
pkt_hdr_normal_ptr[pkt_ind].ip_version = (u_int32_t)pkt->iph.version;
pkt_hdr_normal_ptr[pkt_ind].ip_tos = (u_int32_t)pkt->iph.tos;
pkt_hdr_normal_ptr[pkt_ind].ip_tot_len = (u_int32_t)pkt->iph.tot_len;
pkt_hdr_normal_ptr[pkt_ind].ip_id = (u_int32_t)pkt->iph.id;
pkt_hdr_normal_ptr[pkt_ind].ip_frag_off = (u_int32_t)pkt->iph.frag_off;
pkt_hdr_normal_ptr[pkt_ind].ip_ttl = (u_int32_t)pkt->iph.ttl;
pkt_hdr_normal_ptr[pkt_ind].ip_protocol = (u_int32_t)pkt->iph.protocol;
pkt_hdr_normal_ptr[pkt_ind].ip_check = (u_int32_t)pkt->iph.check;
pkt_hdr_normal_ptr[pkt_ind].ip_saddr = (u_int32_t)pkt->iph.saddr;
pkt_hdr_normal_ptr[pkt_ind].ip_daddr = (u_int32_t)pkt->iph.daddr;
pkt_hdr_normal_ptr[pkt_ind].udp_source = (u_int32_t)pkt->uh.source;
pkt_hdr_normal_ptr[pkt_ind].udp_dest = (u_int32_t)pkt->uh.dest;
pkt_hdr_normal_ptr[pkt_ind].udp_len = (u_int32_t)pkt->uh.len;
pkt_hdr_normal_ptr[pkt_ind].udp_check = (u_int32_t)pkt->uh.check;
}
/////////////////////////////////////BACKPROP//////////////////////////////////////////////
void run_backprop(int argc, char **argv, bool block) {
printf("MARIA inside run_backprop\n");
setup(argc, argv);
}
////////////////////////////////////////CONVOLUTION//////////////////////////////////////////////////
int run_conv_kernel(int argc, char** argv, bool block, bool warmup)
{
float *h_images;
float *h_filters;
float *h_targets;
float *d_images;
float *d_filters;
float *d_targets;
hipStream_t stream;
CU_CHECK_ERR( hipStreamCreate( &stream ) );
printf("Starting convolution kernel\n");
// 100_1_28_16_5_0_1
// Testing data to try and match convnet
//int batch_size = 25;
//int n_images = batch_size;
//int n_img_colors = 32;
//int image_size = 16;
//int n_filters = 32;
//int filter_size = 5;
//int pad = 2;
//int stride = 1;
//int numGroups = 1;
//int modulesX = 1 + CEIL((2*pad + image_size - filter_size), stride);
////int modulesX = 1 + ceil( (double)(2*pad + image_size - filter_size) / (double)stride );
//int n_modules = modulesX * modulesX;
int batch_size = 200;
int n_images = batch_size;
int n_img_colors = 1;
int image_size = 28;
int n_filters = 16;
int filter_size = 5;
int pad = 0;
int stride = 1;
int numGroups = 1;
int modulesX = 1 + CEIL((2*pad + image_size - filter_size), stride);
int n_modules = modulesX * modulesX;
if(argc == 8){
printf("Using command line parameters\n");
// Batch_size | channels | image_size | num_filters | filter_size | pad | stride |
batch_size = atoi(argv[1]);
n_images = batch_size;
n_img_colors = atoi(argv[2]);
image_size = atoi(argv[3]);
n_filters = atoi(argv[4]);
filter_size = atoi(argv[5]);
pad = atoi(argv[6]);
stride = atoi(argv[7]);
modulesX = 1 + CEIL((2*pad + image_size - filter_size), stride);
//modulesX = 1 + ceil( (double)(2*pad + image_size - filter_size) / (double)stride );
n_modules = modulesX * modulesX;
}else{
printf("Using default parameters\n");
//printf("ERROR: Should not use default for parameter sweeping\n");
//abort();
}
// Cuda malloc/memcpy stuff
int images_alloc_sz = n_images * (image_size*image_size*n_img_colors);
int filters_alloc_sz = n_filters * (filter_size*filter_size*n_img_colors);
int target_alloc_sz = n_images * (n_filters*n_modules);
h_images = (float *)malloc(images_alloc_sz*sizeof(float));
h_filters = (float *)malloc(filters_alloc_sz*sizeof(float));
h_targets = (float *)malloc(target_alloc_sz*sizeof(float));
hipMalloc((void **)&d_images, images_alloc_sz*sizeof(float));
hipMalloc((void **)&d_filters, filters_alloc_sz*sizeof(float));
hipMalloc((void **)&d_targets, target_alloc_sz*sizeof(float));
// Populate GPU memory
hipMemcpyAsync(d_images, h_images, images_alloc_sz*sizeof(float), hipMemcpyHostToDevice, streams[0]);
hipMemcpyAsync(d_filters, h_filters, filters_alloc_sz*sizeof(float), hipMemcpyHostToDevice, streams[0]);
_filterActs(d_images, n_images, image_size*image_size*n_img_colors, d_filters, n_filters,
filter_size*filter_size*n_img_colors,
d_targets, n_images, n_filters*n_modules,
image_size, modulesX, modulesX, -1*pad, stride,
n_img_colors, numGroups, 0, 1, 1, streams[0], warmup);
hipMemcpyAsync(h_targets, d_targets, target_alloc_sz*sizeof(float), hipMemcpyDeviceToHost, streams[0]);
if( block ) {
CU_CHECK_ERR( hipDeviceSynchronize() );
free(h_images);
free(h_filters);
free(h_targets);
CU_CHECK_ERR( hipFree(d_images) );
CU_CHECK_ERR( hipFree(d_filters) );
CU_CHECK_ERR( hipFree(d_targets) );
}
printf("Complete...\n");
return 0;
}
/**< Initialize an IPv4 lpm structure using prefixes from IPV4_PREFIX_FILE */
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs, const int conv);
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const int conv);
__global__ void emptyKernel()
{
}
/*******************************************************************************/
/*******************************************************************************/
/****************************** GPU Globals ************************************/
/*******************************************************************************/
/*******************************************************************************/
// NOTE: This requires key lengths to be in increments 4 bytes
__device__ int fast_memcmp(const void *key1, const void *key2, int num){
const unsigned *p1 = (const unsigned* )key1;
const unsigned *p2 = (const unsigned* )key2;
int main_loop = num / sizeof(int);
for(unsigned i=0; i<main_loop; i++){
if(*(p1+i) != *(p2+i)){
return 0;
}
}
return 1;
}
/***********************************************/
/***********************************************/
/***********************************************/
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
////// PREFERS SHARED in config (hipFuncCachePreferShared)
#define CEIL(x, y) ( (x)/(y) + ( (x)%(y) ? 1 : 0 ) )
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs, const int conv) {
__shared__ float shFilters[B_Y*numColors][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*numColors][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = blockIdx.y % blocksPerModule;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += myImgIdx;
filters += filtersPerThread * B_Y * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y) * numImages * numModulesY * numModulesX
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize;
const int y = paddingStart + imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y< imgSizeY && x >= 0 && x < imgSizeX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = images[imgStride * (c * imgPixels + y * imgSizeX + x) + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*numColors; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] = scaleOutputs * prod[f][g];
}
}
}
}
}
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const int conv) {
__shared__ float shFilters[B_Y*colorCache][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*colorCache][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numFilterColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y) * numImages * numModules
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (oc * imgPixels + y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*colorCache; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g];
}
}
}
}
}
void _filterActs(float *images, int images_cols, int images_rows, float *filters, int filters_cols,
int filters_rows, float *targets, int targets_cols, int targets_rows,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput, int conv, hipStream_t stream,
bool warmup) {
int numFilterColors = numImgColors / numGroups;
int numFilters = filters_cols;
int numModules = numModulesY * numModulesX;
int numImages = images_cols;
int imgPixels = images_rows/numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int filterModuleMult = conv ? 1 : numModules;
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 2 == 0);
assert(numFilters % (16 * numGroups) == 0);
assert(numImgColors % numGroups == 0);
assert(images_rows == imgPixels * numImgColors);
assert(imgSizeY * imgSizeX == imgPixels);
int numFiltersPerGroup = numFilters / numGroups;
int imgStride = images_cols; // ???? //images.getStride(); // images does not need to be a contiguous matrix
int filterPixels = filters_rows / (filterModuleMult * numFilterColors);
int filterSize = int(sqrt(filterPixels));
assert(filterSize * filterSize == filterPixels);
assert(filters_rows == filterModuleMult * numFilterColors * filterPixels);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 blocks = numFiltersPerGroup % 32 == 0 ? dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 8))
: dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 4));
if( warmup ) {
blocks = dim3(4, 16);
}
dim3 threads(32, 4);
bool checkImgBounds = numImages % (32*imgsPerThread) != 0;
printf("blocks(%d, %d, %d), threads(%d, %d, %d)\n", blocks.x, blocks.y, blocks.z, threads.x, threads.y, threads.z);
/*
if (scaleTargets == 0) {
targets.resize(numFilters * numModules, numImages);
} else {
assert(targets.getNumRows() == numFilters * numModules);
assert(targets.getNumCols() == numImages);
}
*/
assert(targets_rows == numFilters * numModules);
assert(targets_cols == numImages);
printf("\n\n");
printf("filters.getNumCols = %d, filters.getnumrows = %d, images.getNumCols = %d, images.getNumRows = %d, targets.getNumcols = %d, targets.getNumrows = %d\n\n",
filters_cols, filters_rows, images_cols, images_rows, targets_cols, targets_rows);
printf("\n\n\n====== Kernel Parameters ======\n\n");
printf("images = %p\n"
"filters = %p\n"
"targets = %p\n"
"numImages = %d\n"
"numFilters = %d\n"
"imgSizeY = %d\n"
"imgSizeX = %d\n"
"filterSize = %d\n"
"paddingStart = %d\n"
"moduleStride = %d\n"
"numModulesY = %d\n"
"numModulesX = %d\n"
"imgStride = %d\n"
"scaleTargts = %lf\n"
"scaleOutputs = %lf\n"
"conv = %d\n"
"numImgColors = %d\n"
"imgsPerThread = %d\n"
"numGroups = %d\n"
"checkImgBounds = %d\n"
"numFiltersPerGroup = %d\n"
"blocks = %d, %d, %d\n"
"threads = %d, %d, %d\n"
"\n===================================\n",
images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart,
moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv,
numImgColors, imgsPerThread, numGroups, checkImgBounds, numFiltersPerGroup, blocks.x, blocks.y, blocks.z, threads.x, threads.y, threads.z);
#if 0
dim3 tmpBlocks(4, 64, 1);
//filterActs_YxX_color < 4, 32, 1, 4, 1, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
hipLaunchKernelGGL(( filterActs_YxX_color), dim3(tmpBlocks), dim3(threads), 0, stream, images, filters, targets, numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput,
#endif
if (imgsPerThread == 4) {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
////hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
} else if (imgsPerThread == 2) {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 1, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 1, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 8, 3, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 2, 4, 3, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 2, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
printf("\n\n\n\ I AM HERE \n\n\n");
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
printf("\n\n\n\nBING HERE\n\n\n\n");
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 1, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 1, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, true, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 8, 3, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, true, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 1, 4, 3, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, true >, hipFuncCachePreferShared);
printf("\n\n\n\n\n BING BING BING \n\n\n\n\n");
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, true >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, false >, hipFuncCachePreferShared);
hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 1, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, stream, images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
}
}
__device__ long long src_mac_arr_d[8] = {0x6c10bb211b00, 0x6d10bb211b00, 0x64d2bd211b00, 0x65d2bd211b00,
0xc8a610ca0568, 0xc9a610ca0568, 0xa2a610ca0568, 0xa3a610ca0568};
__device__ long long dst_mac_arr_d[8] = {0x36d3bd211b00, 0x37d3bd211b00, 0x44d7a3211b00, 0x45d7a3211b00,
0xa8d6a3211b00, 0xa9d6a3211b00, 0x0ad7a3211b00, 0x0bd7a3211b00};
__device__ uint32_t ipv6_port_lookup(uint16_t* tbl24, uint16_t* tbl8, ipv6_pkt_hdr_normal* pkt)
{
int status;
uint8_t first_byte;
uint32_t tbl24_index, tbl8_index, tbl_entry;
first_byte = 3;
uint32_t addr = pkt->ip_saddr1;
tbl24_index = (addr >> 8);
tbl_entry = tbl24[tbl24_index];
uint32_t offset = 0;
do {
if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) == RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
if (first_byte == 4) {
addr = pkt->ip_saddr2;
offset = 24;
} else if (first_byte == 8) {
addr = pkt->ip_saddr3;
offset = 24;
} else if (first_byte == 12) {
addr = pkt->ip_saddr4;
offset = 24;
}
uint8_t x = (uint8_t)((addr >> offset) & 0xFF);
tbl8_index = x + ((tbl_entry & RTE_LPM6_TBL8_BITMASK) * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
tbl_entry = tbl8[tbl8_index];
first_byte++;
offset -= 8;
status = 1;
} else {
status = 0;
}
} while (status == 1);
return tbl_entry;
}
__global__ void ipv6_fwd_kernel(ipv6_pkt_hdr_normal* packet_batch, uint16_t* tbl24, uint16_t* tbl8, unsigned n_pkts, int* reg_buffer, bool save_regs)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n_pkts) {
ipv6_pkt_hdr_normal* pkt = &packet_batch[gid];
uint32_t tbl_entry = ipv6_port_lookup(tbl24, tbl8, pkt);
packet_batch[gid].ether_dhost_1 = (uint32_t)(dst_mac_arr_d[tbl_entry] >> 16);
packet_batch[gid].ether_dhost_2 = (uint32_t)(dst_mac_arr_d[tbl_entry] & 0xFFFF);
packet_batch[gid].ether_shost_1 = (uint32_t)(src_mac_arr_d[tbl_entry] >> 16);
packet_batch[gid].ether_shost_2 = (uint32_t)(src_mac_arr_d[tbl_entry] & 0xFFFF);
}
}
__global__ void ipv6_fwd_kernel_save_regs(ipv6_pkt_hdr_normal* packet_batch, uint16_t* tbl24, uint16_t* tbl8, unsigned n_pkts, int* reg_buffer, bool save_regs)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned tid = threadIdx.x;
if (save_regs) {
//save regs
for (int i=0; i<IPV6_REG_NUM; i++) {
//save_register(reg_buffer);
reg_buffer[tid * IPV6_REG_NUM + i] = tid;
}
}
if (gid < n_pkts) {
ipv6_pkt_hdr_normal* pkt = &packet_batch[gid];
uint32_t tbl_entry = ipv6_port_lookup(tbl24, tbl8, pkt);
packet_batch[gid].ether_dhost_1 = (uint32_t)(dst_mac_arr_d[tbl_entry] >> 16);
packet_batch[gid].ether_dhost_2 = (uint32_t)(dst_mac_arr_d[tbl_entry] & 0xFFFF);
packet_batch[gid].ether_shost_1 = (uint32_t)(src_mac_arr_d[tbl_entry] >> 16);
packet_batch[gid].ether_shost_2 = (uint32_t)(src_mac_arr_d[tbl_entry] & 0xFFFF);
}
if (save_regs) {
//save regs
for (int i=0; i<IPV6_REG_NUM; i++) {
tid = reg_buffer[tid * IPV6_REG_NUM + i];
}
}
}
int MatrixMulBase(int argc, char** argv, bool block) {
printf("MatrixMul EDGE Base test\n");
size_t dimAx = 512;
size_t dimAy = 512;
size_t dimBx = 512;
size_t dimBy = 512;
if(argc == 8){
printf("Using command line parameters\n");
// dimx | dimy |
dimAx = atoi(argv[1]);
dimAy = atoi(argv[2]);
dimBx = atoi(argv[3]);
dimBy = atoi(argv[4]);
}else{
printf("Using default parameters\n");
}
size_t dimCx = dimAx;
size_t dimCy = dimBy;
//allocate host mem
unsigned int size_A = dimAx*dimAy;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = dimBx*dimBy;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
unsigned int size_C = dimCx*dimCy;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
printf("initializing host memory\n");
//randomInit(h_A, size_A);
//randomInit(h_B, size_B);
// setup execution parameters
dim3 threads(16, 16, 1);
dim3 grid(MAX(1, dimCx/threads.x), MAX(1, dimCy/threads.y), 1);
float* d_A;
float* d_B;
float* d_C;
printf("Allocating the matrices in GPU mem\n");
CU_CHECK_ERR( hipMalloc((void**)&d_A, dimAx*dimAy*sizeof(float)) );
CU_CHECK_ERR( hipMalloc((void**)&d_B, dimBx*dimBy*sizeof(float)) );
CU_CHECK_ERR( hipMalloc((void**)&d_C, dimAx*dimBy*sizeof(float)) );
printf("Copying the matrices to GPU mem\n");
CU_CHECK_ERR( hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) );
CU_CHECK_ERR( hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) );
hipLaunchKernelGGL(( matrixMul), dim3(grid), dim3(threads), 0, 0, d_C, d_A, d_B, dimAx, dimBx);
if (block) {
hipDeviceSynchronize();
CU_CHECK_ERR( hipMemcpy(d_C, h_C, mem_size_C, hipMemcpyDeviceToHost) );
//PrintMatrices(h_A, h_B, h_C, dimAx, dimAy, dimBx, dimBy, dimCx, dimCy);
}
printf("Complete\n");
return 0;
}
#define XBLOCK_SIZE 16
#define YBLOCK_SIZE 16
//#include "matrixMul.h"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
__global__ void matrixMul( float* C, float* A, float* B, int wA, int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * XBLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = XBLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = YBLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = YBLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[XBLOCK_SIZE][YBLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[YBLOCK_SIZE][XBLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < YBLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * XBLOCK_SIZE * by + YBLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
| d099ebae1b872ed36c4b64fb1b1778d5ec83ce10.cu | #include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string.h>
#include <cuda.h>
#include <sys/time.h>
#include <unistd.h>
#include <time.h>
#include <assert.h>
//#include <matrix_mul_kernel.cu>
#include <edge_cuda.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <time.h>
#include <util.h>
#include <ipv4.h>
#include <ipv6.h>
#include <arpa/inet.h>
#include <common.h>
#define CEIL(x, y) ( (x)/(y) + ( (x)%(y) ? 1 : 0 ) )
#define MAX(x, y) ( (x)>(y) ? (x) : (y) )
#define IPV6_REG_NUM 16
/**< [xia-router0 - xge0,1,2,3], [xia-router1 - xge0,1,2,3] */
LL src_mac_arr[2][4] = {{0x36d3bd211b00, 0x37d3bd211b00, 0xa8d6a3211b00, 0xa9d6a3211b00},
{0x44d7a3211b00, 0x45d7a3211b00, 0x0ad7a3211b00, 0x0bd7a3211b00}};
/**< [xia-router2 - xge0,1,4,5], [xia-router2 - xge2,3,6,7] */
LL dst_mac_arr[2][4] = {{0x6c10bb211b00, 0x6d10bb211b00, 0xc8a610ca0568, 0xc9a610ca0568},
{0x64d2bd211b00, 0x65d2bd211b00, 0xa2a610ca0568, 0xa3a610ca0568}};
uint64_t rss_seed = 0xdeadbeef;
int nStreams = 32;
cudaStream_t* streams = NULL;
unsigned gTimerEventPeriod = 1000;
unsigned gTimerEventPeriodBatch = 200;
bool swizzle = false;
int n_batches = 1;
int n_requests_per_batch = 32;
bool save_regs = true;
int schedule_batch_size = 64;
#define CEIL(x, y) ( (x)/(y) + ( (x)%(y) ? 1 : 0 ) )
#define BLOCK_SIZE 32
#define MAX_THREADS_PER_BLOCK 1024
#define DEFAULT_MATRIX_SIZE 128
struct kernel_args {
unsigned buffer_size;
unsigned batch_size;
unsigned n_batches;
void* h_packet_buffer;
void* h_response_buffer;
void* g_packet_buffer;
void* g_response_buffer;
};
struct MemcParam
{
struct pkt_hdr_normal* packet_buf;
int* gpu_tbl24;
int* gpu_tbl8;
unsigned n;
int* reg_buffer;
bool save_regs;
};
struct MemcParamSwizzle
{
struct pkt_hdr_batch* packet_buf;
int* gpu_tbl24;
int* gpu_tbl8;
unsigned n;
};
enum RunConfig {
BASE_IPV6=0,
EVENT_IPV6,
TIMER_IPV6,
EVENT_TIMER_BG_IPV6,
EVENT_TIMER_BATCH_BG_IPV6,
BG_TASK
};
enum ScheduleType {
SINGLE=0,
TIMER,
BATCH
};
enum BgTaskType {
CONV=0,
MATRIX_MUL,
BACKPROP,
BFS
};
int bg_task_type = MATRIX_MUL;
#define CU_CHECK_ERR(err) \
if ( err != cudaSuccess ) { \
printf("CUDA Error: %s\n", cudaGetErrorString(cudaGetLastError())); \
abort(); \
}
void randomInit(float* data, int size);
void configureParamMem(MemcParam* paramMem, size_t totalBufferSize, size_t batchSize, size_t maxBatches, struct kernel_args* args);
void configureParamMemSwizzle(MemcParamSwizzle* paramMem, size_t totalBufferSize, size_t batchSize, size_t maxBatches, struct kernel_args* args);
void PrintMatrices(float* h_A, float* h_B, float* h_C, int dimAx, int dimAy, int dimBx, int dimBy, int dimCx, int dimCy);
int run_conv_kernel(int argc, char **argv, bool block, bool warmup);
int IpForwardEDGE(int argc, char** argv, bool RunBackgroundTask, ScheduleType scheduleType, bool swizzle);
int IpForwardBase(int argc, char** argv, bool swizzle);
void generate_dummy_packet(struct pkt_hdr* pkt, unsigned gen_type);
unsigned init_normal_requests(struct kernel_args* args, bool alloc_response, int g_batch_size, int g_num_batches);
unsigned init_swizzle_requests(struct kernel_args* args, bool alloc_response, int g_batch_size, int g_num_batches);
void normal_packet(struct pkt_hdr_normal* pkt_hdr_normal_ptr, struct pkt_hdr* pkt, unsigned pkt_ind);
int in_cksum(unsigned char *buf, unsigned nbytes, int sum) ;
void swizzle_packet(struct pkt_hdr_batch* pkt_hdr_batch_ptr, struct pkt_hdr* pkt, unsigned pkt_ind);
static u_int32_t wrapsum (u_int32_t sum) ;
struct rte_lpm *ipv4_init();
void randomInit(float* data, int size);
int MatrixMulBase(int argc, char** argv, bool block);
unsigned init_ipv6_normal_requests(struct kernel_args* args, struct ipv6_prefix* prefix_arr, unsigned prefix_ind, int g_batch_size, int g_num_batches);
void ipv6_normal_packet(struct ipv6_pkt_hdr_normal* pkt_hdr_normal_ptr, struct ipv6_pkt_hdr* pkt, unsigned pkt_ind);
void ipv6_generate_dummy_packet(struct ipv6_pkt_hdr* pkt, struct ipv6_prefix* pfa);
static u_int32_t wrapsum (u_int32_t sum) ;
struct rte_lpm6 *ipv6_init();
//kernels
__global__ void ipv6_fwd_kernel(ipv6_pkt_hdr_normal* packet_batch, uint16_t* tbl24, uint16_t* tbl8, unsigned n_pkts, int* reg_buffer, bool save_regs);
__global__ void ipv6_fwd_kernel_save_regs(ipv6_pkt_hdr_normal* packet_batch, uint16_t* tbl24, uint16_t* tbl8, unsigned n_pkts, int* reg_buffer, bool save_regs);
extern "C" __global__ void matrixMul( float* C, float* A, float* B, int wA, int wB);
void _filterActs(float *images, int images_cols, int images_rows, float *filters, int filters_cols,
int filters_rows, float *targets, int targets_cols, int targets_rows,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput, int conv, cudaStream_t stream, bool warmup);
void run_backprop(int argc, char **argv, bool block);
extern "C" int setup(int argc, char** argv);
int bfs_main(int argc, char** argv);
void run_bfs(int argc, char **argv, bool block) {
printf("MARIA inside run_bfs\n");
bfs_main(argc, argv);
}
int div_up(int n, int d) {
return n / d + (((n < 0) ^ (d > 0)) && (n % d));
}
int main(int argc, char** argv) {
std::cout << "=== EDGE BEGIN ===" << std::endl;
int ret = 0;
RunConfig testToRun = EVENT_IPV6;
int opt;
opterr = 0;
int count = 0;
while( (opt = getopt(argc, argv, "t:s:g:q:z:p:c:b:n:g:i:") ) != -1 ) {
switch(opt) {
case 't':
count += 2;
testToRun = (RunConfig)atoi(optarg);
break;
case 'p':
count += 2;
gTimerEventPeriod = atoi(optarg);
break;
case 'i':
count += 2;
gTimerEventPeriodBatch = atoi(optarg);
break;
case 's':
count += 2;
swizzle = atoi(optarg);
break;
case 'n':
count += 2;
n_batches = atoi(optarg);
break;
case 'b':
count += 2;
n_requests_per_batch = atoi(optarg);
break;
case 'g':
count += 2;
bg_task_type = atoi(optarg);
break;
default:
std::cout << "Error: Unknown parameter: " << opt << std::endl;
abort();
}
}
streams = new cudaStream_t[nStreams];
for( unsigned i=0; i<nStreams; ++i ) {
CU_CHECK_ERR( cudaStreamCreate(&streams[i]) );
}
char** modArgv = &argv[count];
int modArgc = argc-count;
switch( testToRun ) {
case BASE_IPV6:
ret = IpForwardBase(modArgc, modArgv, swizzle);
break;
case EVENT_IPV6:
ret = IpForwardEDGE(modArgc, modArgv, false, SINGLE, swizzle);
break;
case TIMER_IPV6:
ret = IpForwardEDGE(modArgc, modArgv, false, TIMER, swizzle);
break;
case EVENT_TIMER_BG_IPV6:
ret = IpForwardEDGE(modArgc, modArgv, true, TIMER, swizzle);
break;
case EVENT_TIMER_BATCH_BG_IPV6:
ret = IpForwardEDGE(modArgc, modArgv, true, BATCH, swizzle);
break;
case BG_TASK:
printf("Running only background task: %d \n", bg_task_type);
if (bg_task_type == CONV) {
printf("Running background task: conv\n");
run_conv_kernel(argc, argv, true, false);
}
if (bg_task_type == MATRIX_MUL) {
MatrixMulBase(argc, argv, true);
}
if (bg_task_type == BACKPROP) {
run_backprop(argc, argv, true);
}
if (bg_task_type == BFS) {
run_bfs(argc, argv, true);
}
ret = 0;
break;
default:
std::cout << "Error: Undefined test configuration # (" << testToRun << ")" << std::endl;
break;
}
if( ret ) {
std::cout << "Error running test " << testToRun << " - Error=" << ret << std::endl;
}
std::cout << "=== EDGE END ===" << std::endl;
return ret;
}
int IpForwardBase(int argc, char** argv, bool swizzle) {
printf("IpForward EDGE Base test. Swizzle = %d\n", swizzle);
struct kernel_args k_args;
struct rte_lpm6 *lpm;
int num_prefixes = IPV6_NUM_RAND_PREFIXES;
int prefix_mem_size = num_prefixes * sizeof(struct ipv6_prefix);
struct ipv6_prefix *prefix_arr = (struct ipv6_prefix*)malloc(prefix_mem_size);
int mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * IPV6_NUM_TBL8);
int rules_size = sizeof(struct rte_lpm6_rule) * 100000;
/* Allocate memory to store the LPM data structures. Zero out counters. */
lpm = (struct rte_lpm6 *) lpm6_hrd_malloc_socket(RTE_LPM6_SHM_KEY,
mem_size, 0);
int prefix_arr_i = rand() % IPV6_NUM_RAND_PREFIXES;
printf("Mem init trick - do ipv6_init in CPU\n");
CU_CHECK_ERR( edgeExtraipv6(1, (void*)lpm, IPV6_XIA_R2_PORT_MASK, (void*)prefix_arr, 1,n_requests_per_batch, n_batches ) );
init_ipv6_normal_requests(&k_args, prefix_arr, prefix_arr_i,n_requests_per_batch, n_batches);
/**< rte_lpm_tbl24_entry ~ rte_lpm_tbl8_entry ~ uint16_t */
int entry_sz = sizeof(struct rte_lpm6_tbl_entry);
int tbl24_bytes = RTE_LPM6_TBL24_NUM_ENTRIES * entry_sz;
int tbl8_bytes = (IPV6_NUM_TBL8 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES) * entry_sz;
int* gpu_tbl24 = 0;
int* gpu_tbl8 = 0;
/**< Alloc and copy tbl24 and tbl8 arrays to GPU memory */
printf("\tGPU master: alloc tbl24 (size = %lf MB) on device\n", (float)tbl24_bytes / 1e6);
CU_CHECK_ERR(cudaMalloc(&gpu_tbl24, tbl24_bytes));
CU_CHECK_ERR(cudaMemcpy(gpu_tbl24, lpm->tbl24, tbl24_bytes, cudaMemcpyHostToDevice));
printf("\tGPU master: alloc tbl8 (size = %lf MB) on device\n", (float)tbl8_bytes / 1e6);
CU_CHECK_ERR(cudaMalloc(&gpu_tbl8, tbl8_bytes));
CU_CHECK_ERR(cudaMemcpy(gpu_tbl8, lpm->tbl8, tbl8_bytes, cudaMemcpyHostToDevice));
CU_CHECK_ERR(cudaMemcpy(k_args.g_packet_buffer, k_args.h_packet_buffer, k_args.buffer_size, cudaMemcpyHostToDevice));
//launch kernel
dim3 block(n_requests_per_batch, 1, 1);
dim3 grid(n_batches, 1, 1);
unsigned n_packets = k_args.n_batches * k_args.batch_size;
ipv6_fwd_kernel<<<grid, block>>>((ipv6_pkt_hdr_normal*)k_args.g_packet_buffer, (uint16_t *)gpu_tbl24, (uint16_t *)gpu_tbl8, n_packets, NULL, false);
cudaDeviceSynchronize();
return 0;
}
int IpForwardEDGE(int argc, char** argv, bool RunBackgroundTask, ScheduleType scheduleType, bool swizzle) {
printf("IpForward EDGE Test RunBackgroundTask: %d, ScheduleType: %d, Swizzle: %d\n", RunBackgroundTask, scheduleType, swizzle);
int MaxEventsNum = n_batches;
unsigned single_buffer_alloc_size = (n_requests_per_batch * sizeof(struct ipv6_pkt_hdr_normal));
struct kernel_args *k_args = (struct kernel_args *)malloc(MaxEventsNum*sizeof(struct kernel_args));
struct rte_lpm6 *lpm;
//struct ipv6_prefix *prefix_arr;
int num_prefixes = IPV6_NUM_RAND_PREFIXES;
int prefix_mem_size = num_prefixes * sizeof(struct ipv6_prefix);
struct ipv6_prefix *prefix_arr = (struct ipv6_prefix*)malloc(prefix_mem_size);
int mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) *RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * IPV6_NUM_TBL8);
int rules_size = sizeof(struct rte_lpm6_rule) * 100000;
/* Allocate memory to store the LPM data structures. Zero out counters. */
lpm = (struct rte_lpm6 *) lpm6_hrd_malloc_socket(RTE_LPM6_SHM_KEY,
mem_size, 0);
//lpm = ipv6_init(IPV6_XIA_R2_PORT_MASK, &prefix_arr, 1);
int prefix_arr_i = rand() % IPV6_NUM_RAND_PREFIXES;
printf("Mem init trick - do ipv6_init in CPU\n");
CU_CHECK_ERR( edgeExtraipv6(1, (void*)lpm, IPV6_XIA_R2_PORT_MASK, (void*)prefix_arr, 1,n_requests_per_batch, n_batches ) );
// initialize host memory
for( unsigned batch=0; batch<MaxEventsNum; ++batch ) {
unsigned buffer_alloc_size = init_ipv6_normal_requests(&k_args[batch], prefix_arr, prefix_arr_i,n_requests_per_batch, n_batches);
//printf("Generated input packets for batch %d. buffer_alloc_size = %lld\n", single_buffer_alloc_size);
}
int entry_sz = sizeof(struct rte_lpm6_tbl_entry);
//int entry_sz = sizeof(struct rte_lpm_tbl24_entry);
int tbl24_bytes = RTE_LPM6_TBL24_NUM_ENTRIES * entry_sz;
int tbl8_bytes = (IPV6_NUM_TBL8 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES) * entry_sz;
int* gpu_tbl24 = 0;
int* gpu_tbl8 = 0;
/**< Alloc and copy tbl24 and tbl8 arrays to GPU memory */
printf("\tGPU master: alloc tbl24 (size = %lf MB) on device\n", (float)tbl24_bytes / 1e6);
CU_CHECK_ERR(cudaMalloc(&gpu_tbl24, tbl24_bytes));
CU_CHECK_ERR(cudaMemcpy(gpu_tbl24, lpm->tbl24, tbl24_bytes, cudaMemcpyHostToDevice));
printf("\tGPU master: alloc tbl8 (size = %lf MB) on device\n", (float)tbl8_bytes / 1e6);
CU_CHECK_ERR(cudaMalloc(&gpu_tbl8, tbl8_bytes));
CU_CHECK_ERR(cudaMemcpy(gpu_tbl8, lpm->tbl8, tbl8_bytes, cudaMemcpyHostToDevice));
//------------------------------------------//
// setup execution parameters
dim3 block(min(n_requests_per_batch, MAX_THREADS_PER_BLOCK), 1, 1);
dim3 grid(div_up(n_requests_per_batch, MAX_THREADS_PER_BLOCK), 1, 1);
// Register the event kernel
int eventId;
eventId = cudaRegisterEvent((void*)ipv6_fwd_kernel, (void*)ipv6_fwd_kernel_save_regs, grid, block, 0);
//Setup the arguments
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(struct ipv6_pkt_hdr_normal*), 0) ); //packet buffer
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(uint16_t*), 8) ); //gpu_tbl24
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(uint16_t*), 16) ); //gpu_tbl8
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(unsigned), 24) ); //n_packets
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(int*), 32) ); //reg_buffer
CU_CHECK_ERR( cudaSetupEventArgument(sizeof(bool), 40) ); //save_regs
// Configure the parameter memory
unsigned paramSize = sizeof(MemcParam);
MemcParam* paramMem = (MemcParam*)cudaConfigureEventParam(eventId, paramSize, MaxEventsNum, false);
printf("MARIA DEBUG allocated param mem = %lld \n", paramMem);
configureParamMem(paramMem, single_buffer_alloc_size, n_requests_per_batch, MaxEventsNum, k_args);
//copy from host to gpu
MemcParam* curParam = paramMem;
for( unsigned batch=0; batch<MaxEventsNum; ++batch ) {
CU_CHECK_ERR(cudaMemcpy(curParam->packet_buf, k_args[batch].h_packet_buffer, single_buffer_alloc_size, cudaMemcpyHostToDevice));
curParam->gpu_tbl24 = gpu_tbl24;
curParam->gpu_tbl8 = gpu_tbl8;
curParam++;
}
//////////////////////HACK
paramMem = (MemcParam*)cudaConfigureEventParam(eventId, paramSize, MaxEventsNum, true);
////////////////////////
printf("Scheduling EDGE event\n");
if (scheduleType == TIMER) {
//Schedule the event kernel to run on a timer
CU_CHECK_ERR( cudaScheduleTimerEvent(eventId, gTimerEventPeriod) );
} else if (scheduleType==SINGLE) { //event
for( unsigned batch=0; batch<MaxEventsNum; ++batch ) {
CU_CHECK_ERR( cudaScheduleEvent(eventId) );
};
} else { //batch
CU_CHECK_ERR( cudaScheduleEventTimerBatch(eventId, gTimerEventPeriod, schedule_batch_size, gTimerEventPeriodBatch) );
};
if (RunBackgroundTask) {
if (bg_task_type == CONV) {
printf("Running background task: conv\n");
run_conv_kernel(argc, argv, false, false);
}
if (bg_task_type == MATRIX_MUL) {
MatrixMulBase(argc, argv, false);
}
if (bg_task_type == BACKPROP) {
run_backprop(argc, argv, true);
}
if (bg_task_type == BFS) {
run_bfs(argc, argv, true);
}
}
CU_CHECK_ERR( cudaDeviceSynchronize() );
std::cout << "Success!" << std::endl;
return 0;
}
void configureParamMem(MemcParam* paramMem, size_t totalBufferSize, size_t batchSize, size_t maxBatches, struct kernel_args* args)
{
MemcParam* curParam = paramMem;
for( unsigned batch=0; batch<maxBatches; ++batch ) {
//CU_CHECK_ERR( cudaMalloc((void**)&curParam->packet_buf, totalBufferSize) );
curParam->packet_buf = (struct pkt_hdr_normal *)args[batch].g_packet_buffer;
int reg_buffer_size = 32 * IPV4_REG_NUM * 512;
CU_CHECK_ERR(cudaMalloc(&curParam->reg_buffer, reg_buffer_size));
curParam->n = batchSize;
curParam->save_regs = true;
curParam++;
}
}
void configureParamMemSwizzle(MemcParamSwizzle* paramMem, size_t totalBufferSize, size_t batchSize, size_t maxBatches, struct kernel_args* args)
{
MemcParamSwizzle* curParam = paramMem;
for( unsigned batch=0; batch<maxBatches; ++batch ) {
//CU_CHECK_ERR( cudaMalloc((void**)&curParam->packet_buf, totalBufferSize) );
curParam->packet_buf = (struct pkt_hdr_batch*)args[batch].g_packet_buffer;
curParam->n = batchSize;
//curParam->save_regs = save_regs;
curParam++;
}
}
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
//////////////////////////////////////////////////////IP-FORWARDING related functions /////////////////////////////////////////////
unsigned init_normal_requests(struct kernel_args* args, bool alloc_response, int g_batch_size, int g_num_batches)
{
unsigned total_num_requests = g_batch_size * g_num_batches;
unsigned buffer_alloc_size = (g_num_batches * g_batch_size * sizeof(struct pkt_hdr_normal));
struct pkt_hdr_normal* packet_buffer = NULL;
struct pkt_hdr_normal* response_buffer = NULL;
struct pkt_hdr_normal* gpu_packet_buffer = NULL;
struct pkt_hdr_normal* gpu_response_buffer = NULL;
CU_CHECK_ERR(cudaMalloc(&gpu_packet_buffer, buffer_alloc_size));
packet_buffer = (pkt_hdr_normal*)malloc(buffer_alloc_size);
if (alloc_response) {
CU_CHECK_ERR(cudaMalloc(&gpu_response_buffer, buffer_alloc_size));
response_buffer = (pkt_hdr_normal*)malloc(buffer_alloc_size);
}
struct pkt_hdr pkt;
for (unsigned i=0; i<g_num_batches; ++i) {
for (unsigned j=0; j<g_batch_size; ++j) {
// Load in the actual packet
generate_dummy_packet(&pkt, 1);
unsigned ind = i*g_batch_size + j;
normal_packet(packet_buffer, &pkt, ind);
}
}
assert(args);
args->buffer_size = buffer_alloc_size;
args->batch_size = g_batch_size;
args->n_batches = g_num_batches;
args->h_packet_buffer = (void*)packet_buffer;
args->h_response_buffer = (void*)response_buffer;
args->g_packet_buffer = gpu_packet_buffer;
args->g_response_buffer = gpu_response_buffer;
return buffer_alloc_size;
}
unsigned init_swizzle_requests(struct kernel_args* args, bool alloc_response, int g_batch_size, int g_num_batches)
{
int res = CUDA_SUCCESS;
unsigned total_num_requests = g_batch_size * g_num_batches;
unsigned buffer_alloc_size = (g_num_batches * sizeof(struct pkt_hdr_batch));
struct pkt_hdr_batch* packet_buffer = NULL;
struct pkt_hdr_batch* response_buffer = NULL;
struct pkt_hdr_normal* gpu_packet_buffer = 0;
struct pkt_hdr_normal* gpu_response_buffer = 0;
CU_CHECK_ERR(cudaMalloc(&gpu_packet_buffer, buffer_alloc_size));
packet_buffer = (pkt_hdr_batch*)malloc(buffer_alloc_size);
if (alloc_response) {
CU_CHECK_ERR(cudaMalloc(&gpu_response_buffer, buffer_alloc_size));
response_buffer = (pkt_hdr_batch*)malloc(buffer_alloc_size);
}
unsigned pkt_to_print = 312;
bool verbose = false;
struct pkt_hdr pkt;
for (unsigned i=0; i<g_num_batches; ++i) {
for (unsigned j=0; j<g_batch_size; ++j) {
// Load in the actual packet
generate_dummy_packet(&pkt, 1);
if (verbose && j == pkt_to_print) {
//print_pkt_hdr(&pkt);
}
swizzle_packet(&packet_buffer[i], &pkt, j);
}
}
if (verbose)
//print_swizzled_packet(&packet_buffer[0], pkt_to_print);
assert(args);
args->buffer_size = buffer_alloc_size;
args->batch_size = g_batch_size;
args->n_batches = g_num_batches;
args->h_packet_buffer = (void*)packet_buffer;
args->h_response_buffer = (void*)response_buffer;
args->g_packet_buffer = gpu_packet_buffer;
args->g_response_buffer = gpu_response_buffer;
return buffer_alloc_size;
}
void swizzle_packet(struct pkt_hdr_batch* pkt_hdr_batch_ptr, struct pkt_hdr* pkt, unsigned pkt_ind)
{
memcpy((u_int8_t*)&pkt_hdr_batch_ptr->ether_dhost_1[pkt_ind], pkt->eh.ether_dhost, 4);
memcpy((u_int8_t*)&pkt_hdr_batch_ptr->ether_dhost_2[pkt_ind], pkt->eh.ether_dhost + 4, 2);
memcpy((u_int8_t*)&pkt_hdr_batch_ptr->ether_shost_1[pkt_ind], pkt->eh.ether_shost, 4);
memcpy((u_int8_t*)&pkt_hdr_batch_ptr->ether_shost_2[pkt_ind], pkt->eh.ether_shost + 4, 2);
pkt_hdr_batch_ptr->ether_type[pkt_ind] = (u_int32_t)pkt->eh.ether_type;
pkt_hdr_batch_ptr->ip_version[pkt_ind] = (u_int32_t)pkt->iph.version;
pkt_hdr_batch_ptr->ip_tos[pkt_ind] = (u_int32_t)pkt->iph.tos;
pkt_hdr_batch_ptr->ip_tot_len[pkt_ind] = (u_int32_t)pkt->iph.tot_len;
pkt_hdr_batch_ptr->ip_id[pkt_ind] = (u_int32_t)pkt->iph.id;
pkt_hdr_batch_ptr->ip_frag_off[pkt_ind] = (u_int32_t)pkt->iph.frag_off;
pkt_hdr_batch_ptr->ip_ttl[pkt_ind] = (u_int32_t)pkt->iph.ttl;
pkt_hdr_batch_ptr->ip_protocol[pkt_ind] = (u_int32_t)pkt->iph.protocol;
pkt_hdr_batch_ptr->ip_check[pkt_ind] = (u_int32_t)pkt->iph.check;
pkt_hdr_batch_ptr->ip_saddr[pkt_ind] = (u_int32_t)pkt->iph.saddr;
pkt_hdr_batch_ptr->ip_daddr[pkt_ind] = (u_int32_t)pkt->iph.daddr;
pkt_hdr_batch_ptr->udp_source[pkt_ind] = (u_int32_t)pkt->uh.source;
pkt_hdr_batch_ptr->udp_dest[pkt_ind] = (u_int32_t)pkt->uh.dest;
pkt_hdr_batch_ptr->udp_len[pkt_ind] = (u_int32_t)pkt->uh.len;
pkt_hdr_batch_ptr->udp_check[pkt_ind] = (u_int32_t)pkt->uh.check;
}
unsigned init_ipv6_normal_requests(struct kernel_args* args, struct ipv6_prefix* prefix_arr, unsigned prefix_ind, int g_batch_size, int g_num_batches)
{
unsigned total_num_requests = g_batch_size * g_num_batches;
unsigned buffer_alloc_size = (g_num_batches * g_batch_size * sizeof(struct ipv6_pkt_hdr_normal));
struct ipv6_pkt_hdr_normal* packet_buffer = NULL;
struct ipv6_pkt_hdr_normal* gpu_packet_buffer = NULL;
cudaMalloc(&gpu_packet_buffer, buffer_alloc_size);
packet_buffer = (ipv6_pkt_hdr_normal*)malloc(buffer_alloc_size);
struct ipv6_pkt_hdr pkt;
for (unsigned i=0; i<g_num_batches; ++i) {
for (unsigned j=0; j<g_batch_size; ++j) {
// Load in the actual packet
ipv6_generate_dummy_packet(&pkt, &prefix_arr[prefix_ind]);
prefix_ind = (prefix_ind+1) % IPV6_NUM_RAND_PREFIXES;
unsigned ind = i*g_batch_size + j;
ipv6_normal_packet(packet_buffer, &pkt, ind);
}
}
assert(args);
args->buffer_size = buffer_alloc_size;
args->batch_size = g_batch_size;
args->n_batches = g_num_batches;
args->h_packet_buffer = (void*)packet_buffer;
args->h_response_buffer = NULL;
args->g_packet_buffer = gpu_packet_buffer;
args->g_response_buffer = 0;
return buffer_alloc_size;
}
void ipv6_normal_packet(struct ipv6_pkt_hdr_normal* pkt_hdr_normal_ptr, struct ipv6_pkt_hdr* pkt, unsigned pkt_ind)
{
// ETH
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_dhost_1, pkt->eh.ether_dhost, 4);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_dhost_2, pkt->eh.ether_dhost + 4, 2);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_shost_1, pkt->eh.ether_shost, 4);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_shost_2, pkt->eh.ether_shost + 4, 2);
pkt_hdr_normal_ptr[pkt_ind].ether_type = (u_int32_t)pkt->eh.ether_type;
// IPH
pkt_hdr_normal_ptr[pkt_ind].ip_vtc_flow = (u_int32_t)pkt->iph.vtc_flow;
pkt_hdr_normal_ptr[pkt_ind].ip_payload_len = (u_int32_t)pkt->iph.payload_len;
pkt_hdr_normal_ptr[pkt_ind].ip_proto = (u_int32_t)pkt->iph.proto;
pkt_hdr_normal_ptr[pkt_ind].ip_hop_limits = (u_int32_t)pkt->iph.hop_limits;
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_saddr1, &pkt->iph.src_addr[0], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_saddr2, &pkt->iph.src_addr[4], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_saddr3, &pkt->iph.src_addr[8], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_saddr4, &pkt->iph.src_addr[12], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_daddr1, &pkt->iph.dst_addr[0], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_daddr2, &pkt->iph.dst_addr[4], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_daddr3, &pkt->iph.dst_addr[8], 4);
memcpy(&pkt_hdr_normal_ptr[pkt_ind].ip_daddr4, &pkt->iph.dst_addr[12], 4);
// UDPH
pkt_hdr_normal_ptr[pkt_ind].udp_source = (u_int32_t)pkt->uh.source;
pkt_hdr_normal_ptr[pkt_ind].udp_dest = (u_int32_t)pkt->uh.dest;
pkt_hdr_normal_ptr[pkt_ind].udp_len = (u_int32_t)pkt->uh.len;
pkt_hdr_normal_ptr[pkt_ind].udp_check = (u_int32_t)pkt->uh.check;
}
unsigned g_pkt_id=0;
void generate_dummy_packet(struct pkt_hdr* pkt, unsigned gen_type)
{
unsigned pkt_size = sizeof(struct pkt_hdr);
if (gen_type == 0) {
u_int32_t src_ip = 0xC0A80002 /* from 192.168.0.2 */;
u_int32_t dst_ip = 0xC0A80104 /* 192.168.1.4 */;
// Ethernet
pkt->eh.ether_type = htons(0x0800);
pkt->eh.ether_shost[0] = 0x68;
pkt->eh.ether_shost[1] = 0x05;
pkt->eh.ether_shost[2] = 0xCA;
pkt->eh.ether_shost[3] = 0x13;
pkt->eh.ether_shost[4] = 0xCE;
pkt->eh.ether_shost[5] = 0x79;
pkt->eh.ether_dhost[0] = 0x68;
pkt->eh.ether_dhost[1] = 0x05;
pkt->eh.ether_dhost[2] = 0xCA;
pkt->eh.ether_dhost[3] = 0x1B;
pkt->eh.ether_dhost[4] = 0x1E;
pkt->eh.ether_dhost[5] = 0x66;
// IP
//pkt->iph.ihl = 5;
pkt->iph.version = 4;
pkt->iph.tos = 0;
pkt->iph.tot_len = htons(pkt_size - sizeof(ether_header));
pkt->iph.id = htons(g_pkt_id++);
pkt->iph.ttl = 64;
pkt->iph.frag_off = htons(0);
pkt->iph.protocol = IPPROTO_UDP;
pkt->iph.daddr = htonl(dst_ip);
pkt->iph.saddr = htonl(src_ip);
pkt->iph.check = wrapsum(in_cksum((unsigned char *)&pkt->iph, sizeof(struct ip_header), 0));
// UDP
pkt->uh.source = htons(9191);
pkt->uh.dest = htons(9960);
pkt->uh.len = htons(pkt_size - sizeof(ether_header) - sizeof(ip_header));
pkt->uh.check = 0; /* It must be 0 to compute the checksum */
//i = sizeof(struct ether_header) + sizeof(struct ip_header) + sizeof(struct udp_header);
/*udp_header->check = wrapsum(in_cksum((unsigned char *)udp_header, sizeof(struct udp_header),
in_cksum((unsigned char *)&buffer[i], send_len-i,
in_cksum((unsigned char *)&ip_header->saddr,
2*sizeof(ip_header->saddr),
IPPROTO_UDP + ntohs(udp_header->len)))));*/
} else if (gen_type == 1) {
set_mac(&pkt->eh.ether_shost[0], src_mac_arr[0][0]);
set_mac(&pkt->eh.ether_dhost[0], dst_mac_arr[0][0]);
pkt->eh.ether_type = htons(0x0800);
pkt->iph.version = 0x40 | 0x05;
pkt->iph.tos = 0;
pkt->iph.tot_len = htons(pkt_size - sizeof(ether_header));
pkt->iph.id = htons(g_pkt_id++);
pkt->iph.ttl = 64;
pkt->iph.frag_off = htons(0);
pkt->iph.protocol = IPPROTO_UDP;
pkt->iph.saddr = htonl(fastrand(&rss_seed));
pkt->iph.daddr = htonl(fastrand(&rss_seed));
pkt->iph.check = wrapsum(in_cksum((unsigned char *)&pkt->iph, sizeof(struct ip_header), 0));
// UDP
pkt->uh.source = htons(9191);
pkt->uh.dest = htons(9960);
pkt->uh.len = htons(pkt_size - sizeof(ether_header) - sizeof(ip_header));
pkt->uh.check = 0; /* It must be 0 to compute the checksum */
//if (g_pkt_id < 4) {
// print_pkt_hdr(pkt);
//}
} else {
//cout << "Error: Unknown gen_type = " << gen_type << endl;
abort();
}
return;
}
//unsigned g_pkt_id=0;
void ipv6_generate_dummy_packet(struct ipv6_pkt_hdr* pkt, struct ipv6_prefix* pfa)
{
unsigned pkt_size = sizeof(struct ipv6_pkt_hdr);
set_mac(&pkt->eh.ether_shost[0], src_mac_arr[0][0]);
set_mac(&pkt->eh.ether_dhost[0], dst_mac_arr[0][0]);
pkt->eh.ether_type = htons(0x0800);
pkt->iph.vtc_flow = 0;
pkt->iph.payload_len = 2 + sizeof(int) + sizeof(LL);
pkt->iph.proto = IPPROTO_IPV6;
pkt->iph.hop_limits = 64;
memcpy(pkt->iph.src_addr, pfa->bytes, IPV6_ADDR_LEN);
memcpy(pkt->iph.dst_addr, pfa->bytes, IPV6_ADDR_LEN);
// UDP
pkt->uh.source = htons(9191);
pkt->uh.dest = htons(9960);
pkt->uh.len = htons(pkt_size - sizeof(ether_header) - sizeof(struct ipv6_hdr));
pkt->uh.check = 0; /* It must be 0 to compute the checksum */
}
int in_cksum(unsigned char *buf, unsigned nbytes, int sum)
{
uint i;
/* Checksum all the pairs of bytes first... */
for (i = 0; i < (nbytes & ~1U); i += 2) {
sum += (u_int16_t) ntohs(*((u_int16_t *)(buf + i)));
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
/* If there's a single byte left over, checksum it, too. Network
byte order is big-endian, so the remaining byte is the high byte. */
if(i < nbytes) {
sum += buf [i] << 8;
/* Add carry. */
if(sum > 0xFFFF)
sum -= 0xFFFF;
}
return sum;
}
static u_int32_t wrapsum (u_int32_t sum)
{
sum = ~sum & 0xFFFF;
return htons(sum);
}
void normal_packet(struct pkt_hdr_normal* pkt_hdr_normal_ptr, struct pkt_hdr* pkt, unsigned pkt_ind)
{
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_dhost_1, pkt->eh.ether_dhost, 4);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_dhost_2, pkt->eh.ether_dhost + 4, 2);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_shost_1, pkt->eh.ether_shost, 4);
memcpy((u_int8_t*)&pkt_hdr_normal_ptr[pkt_ind].ether_shost_2, pkt->eh.ether_shost + 4, 2);
pkt_hdr_normal_ptr[pkt_ind].ether_type = (u_int32_t)pkt->eh.ether_type;
pkt_hdr_normal_ptr[pkt_ind].ip_version = (u_int32_t)pkt->iph.version;
pkt_hdr_normal_ptr[pkt_ind].ip_tos = (u_int32_t)pkt->iph.tos;
pkt_hdr_normal_ptr[pkt_ind].ip_tot_len = (u_int32_t)pkt->iph.tot_len;
pkt_hdr_normal_ptr[pkt_ind].ip_id = (u_int32_t)pkt->iph.id;
pkt_hdr_normal_ptr[pkt_ind].ip_frag_off = (u_int32_t)pkt->iph.frag_off;
pkt_hdr_normal_ptr[pkt_ind].ip_ttl = (u_int32_t)pkt->iph.ttl;
pkt_hdr_normal_ptr[pkt_ind].ip_protocol = (u_int32_t)pkt->iph.protocol;
pkt_hdr_normal_ptr[pkt_ind].ip_check = (u_int32_t)pkt->iph.check;
pkt_hdr_normal_ptr[pkt_ind].ip_saddr = (u_int32_t)pkt->iph.saddr;
pkt_hdr_normal_ptr[pkt_ind].ip_daddr = (u_int32_t)pkt->iph.daddr;
pkt_hdr_normal_ptr[pkt_ind].udp_source = (u_int32_t)pkt->uh.source;
pkt_hdr_normal_ptr[pkt_ind].udp_dest = (u_int32_t)pkt->uh.dest;
pkt_hdr_normal_ptr[pkt_ind].udp_len = (u_int32_t)pkt->uh.len;
pkt_hdr_normal_ptr[pkt_ind].udp_check = (u_int32_t)pkt->uh.check;
}
/////////////////////////////////////BACKPROP//////////////////////////////////////////////
void run_backprop(int argc, char **argv, bool block) {
printf("MARIA inside run_backprop\n");
setup(argc, argv);
}
////////////////////////////////////////CONVOLUTION//////////////////////////////////////////////////
int run_conv_kernel(int argc, char** argv, bool block, bool warmup)
{
float *h_images;
float *h_filters;
float *h_targets;
float *d_images;
float *d_filters;
float *d_targets;
cudaStream_t stream;
CU_CHECK_ERR( cudaStreamCreate( &stream ) );
printf("Starting convolution kernel\n");
// 100_1_28_16_5_0_1
// Testing data to try and match convnet
//int batch_size = 25;
//int n_images = batch_size;
//int n_img_colors = 32;
//int image_size = 16;
//int n_filters = 32;
//int filter_size = 5;
//int pad = 2;
//int stride = 1;
//int numGroups = 1;
//int modulesX = 1 + CEIL((2*pad + image_size - filter_size), stride);
////int modulesX = 1 + ceil( (double)(2*pad + image_size - filter_size) / (double)stride );
//int n_modules = modulesX * modulesX;
int batch_size = 200;
int n_images = batch_size;
int n_img_colors = 1;
int image_size = 28;
int n_filters = 16;
int filter_size = 5;
int pad = 0;
int stride = 1;
int numGroups = 1;
int modulesX = 1 + CEIL((2*pad + image_size - filter_size), stride);
int n_modules = modulesX * modulesX;
if(argc == 8){
printf("Using command line parameters\n");
// Batch_size | channels | image_size | num_filters | filter_size | pad | stride |
batch_size = atoi(argv[1]);
n_images = batch_size;
n_img_colors = atoi(argv[2]);
image_size = atoi(argv[3]);
n_filters = atoi(argv[4]);
filter_size = atoi(argv[5]);
pad = atoi(argv[6]);
stride = atoi(argv[7]);
modulesX = 1 + CEIL((2*pad + image_size - filter_size), stride);
//modulesX = 1 + ceil( (double)(2*pad + image_size - filter_size) / (double)stride );
n_modules = modulesX * modulesX;
}else{
printf("Using default parameters\n");
//printf("ERROR: Should not use default for parameter sweeping\n");
//abort();
}
// Cuda malloc/memcpy stuff
int images_alloc_sz = n_images * (image_size*image_size*n_img_colors);
int filters_alloc_sz = n_filters * (filter_size*filter_size*n_img_colors);
int target_alloc_sz = n_images * (n_filters*n_modules);
h_images = (float *)malloc(images_alloc_sz*sizeof(float));
h_filters = (float *)malloc(filters_alloc_sz*sizeof(float));
h_targets = (float *)malloc(target_alloc_sz*sizeof(float));
cudaMalloc((void **)&d_images, images_alloc_sz*sizeof(float));
cudaMalloc((void **)&d_filters, filters_alloc_sz*sizeof(float));
cudaMalloc((void **)&d_targets, target_alloc_sz*sizeof(float));
// Populate GPU memory
cudaMemcpyAsync(d_images, h_images, images_alloc_sz*sizeof(float), cudaMemcpyHostToDevice, streams[0]);
cudaMemcpyAsync(d_filters, h_filters, filters_alloc_sz*sizeof(float), cudaMemcpyHostToDevice, streams[0]);
_filterActs(d_images, n_images, image_size*image_size*n_img_colors, d_filters, n_filters,
filter_size*filter_size*n_img_colors,
d_targets, n_images, n_filters*n_modules,
image_size, modulesX, modulesX, -1*pad, stride,
n_img_colors, numGroups, 0, 1, 1, streams[0], warmup);
cudaMemcpyAsync(h_targets, d_targets, target_alloc_sz*sizeof(float), cudaMemcpyDeviceToHost, streams[0]);
if( block ) {
CU_CHECK_ERR( cudaDeviceSynchronize() );
free(h_images);
free(h_filters);
free(h_targets);
CU_CHECK_ERR( cudaFree(d_images) );
CU_CHECK_ERR( cudaFree(d_filters) );
CU_CHECK_ERR( cudaFree(d_targets) );
}
printf("Complete...\n");
return 0;
}
/**< Initialize an IPv4 lpm structure using prefixes from IPV4_PREFIX_FILE */
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs, const int conv);
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const int conv);
__global__ void emptyKernel()
{
}
/*******************************************************************************/
/*******************************************************************************/
/****************************** GPU Globals ************************************/
/*******************************************************************************/
/*******************************************************************************/
// NOTE: This requires key lengths to be in increments 4 bytes
__device__ int fast_memcmp(const void *key1, const void *key2, int num){
const unsigned *p1 = (const unsigned* )key1;
const unsigned *p2 = (const unsigned* )key2;
int main_loop = num / sizeof(int);
for(unsigned i=0; i<main_loop; i++){
if(*(p1+i) != *(p2+i)){
return 0;
}
}
return 1;
}
/***********************************************/
/***********************************************/
/***********************************************/
#ifndef DIVUP
#define DIVUP(x, y) (((x) + (y) - 1) / (y))
#endif
////// PREFERS SHARED in config (cudaFuncCachePreferShared)
#define CEIL(x, y) ( (x)/(y) + ( (x)%(y) ? 1 : 0 ) )
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_color(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride, const int numModulesY, const int numModulesX, const int imgStride,
const float scaleTargets, const float scaleOutputs, const int conv) {
__shared__ float shFilters[B_Y*numColors][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*numColors][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = blockIdx.y % blocksPerModule;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += myImgIdx;
filters += filtersPerThread * B_Y * blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y) * numImages * numModulesY * numModulesX
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize;
const int y = paddingStart + imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y< imgSizeY && x >= 0 && x < imgSizeX) {
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = images[imgStride * (c * imgPixels + y * imgSizeX + x) + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < numColors; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*numColors; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModulesY * numModulesX] = scaleOutputs * prod[f][g];
}
}
}
}
}
template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds>
__global__ void filterActs_YxX_sparse(float* images, float* filters, float* targets,
const int numImages, const int numFilters,
const int imgSizeY, const int imgSizeX, const int filterSize, const int paddingStart,
const int moduleStride,
const int numModulesY, const int numModulesX, const int imgStride, const int numImgColors,
const int numGroups,
const float scaleTargets, const float scaleOutputs,
const int conv) {
__shared__ float shFilters[B_Y*colorCache][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters
__shared__ float shImages[B_Y*colorCache][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images
const int imgPixels = imgSizeY * imgSizeX;
const int filterPixels = filterSize * filterSize;
const int numFilterColors = numImgColors / numGroups;
const int blocksPerModule = numFilters / (B_Y*filtersPerThread);
const int moduleIdx = blockIdx.y / blocksPerModule;
const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule);
const int numFiltersPerGroup = numFilters / numGroups;
const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup;
const int numModules = numModulesX * numModulesY;
const int blockColorIdx = numFilterColors * blockGroupIdx;
const int tidx = threadIdx.y * B_X + threadIdx.x;
const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride;
const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride;
const int shFilterLoadY = tidx / (B_Y * filtersPerThread);
const int shFilterLoadX = tidx % (B_Y * filtersPerThread);
const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x;
images += blockColorIdx * imgPixels * imgStride + myImgIdx;
filters +=blockFilterIdx
+ shFilterLoadY * numFilters + shFilterLoadX;
if (!conv) {
filters += moduleIdx * numFilterColors * filterPixels * numFilters;
}
targets += moduleIdx * numImages
+ (blockFilterIdx + threadIdx.y) * numImages * numModules
+ myImgIdx;
float prod[filtersPerThread][imgsPerThread];
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] = 0;
}
}
// __shared__ int imgPos[]
for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop)
for (int p = 0; p < filterPixels; p += B_Y) {
/*
* Load B_Y pixels from B_Y*filtersPerThread filters
*/
if (shFilterLoadY < B_Y) {
#pragma unroll
for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) {
if (p + p2 + shFilterLoadY < filterPixels) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0;
}
}
}
}
/*
* Load B_Y pixels from B_X*imgsPerThread images
*/
const int pixIdx = p + threadIdx.y;
if (pixIdx < filterPixels) {
const int x = imgLoadModPosX + pixIdx % filterSize;
const int y = imgLoadModPosY + pixIdx / filterSize;
if (y >= 0 && y < imgSizeY && x >= 0 && x < imgSizeX) {
float* m = &images[imgStride * (oc * imgPixels + y * imgSizeX + x)];
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
if (!checkImgBounds || myImgIdx + i * B_X < numImages) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X];
}
} else {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
} else { // Padding
#pragma unroll
for (int i = 0; i < imgsPerThread; i++) {
#pragma unroll
for (int c = 0; c < colorCache; c++) {
shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0;
}
}
}
}
__syncthreads();
#pragma unroll
for (int i = 0; i < B_Y*colorCache; i++) {
#pragma unroll
for(int f = 0; f < filtersPerThread; f++) {
#pragma unroll
for(int g = 0; g < imgsPerThread; g++) {
prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y];
}
}
}
__syncthreads();
}
}
if (scale) {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g];
}
}
}
} else {
#pragma unroll
for (int g = 0; g < imgsPerThread; g++) {
if (!checkImgBounds || myImgIdx + g * B_X < numImages) {
#pragma unroll
for (int f = 0; f < filtersPerThread; f++) {
targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g];
}
}
}
}
}
void _filterActs(float *images, int images_cols, int images_rows, float *filters, int filters_cols,
int filters_rows, float *targets, int targets_cols, int targets_rows,
int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride,
int numImgColors, int numGroups, float scaleTargets, float scaleOutput, int conv, cudaStream_t stream,
bool warmup) {
int numFilterColors = numImgColors / numGroups;
int numFilters = filters_cols;
int numModules = numModulesY * numModulesX;
int numImages = images_cols;
int imgPixels = images_rows/numImgColors;
int imgSizeX = imgPixels / imgSizeY;
int filterModuleMult = conv ? 1 : numModules;
assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0)));
assert(numGroups == 1 || numFilterColors % 2 == 0);
assert(numFilters % (16 * numGroups) == 0);
assert(numImgColors % numGroups == 0);
assert(images_rows == imgPixels * numImgColors);
assert(imgSizeY * imgSizeX == imgPixels);
int numFiltersPerGroup = numFilters / numGroups;
int imgStride = images_cols; // ???? //images.getStride(); // images does not need to be a contiguous matrix
int filterPixels = filters_rows / (filterModuleMult * numFilterColors);
int filterSize = int(sqrt(filterPixels));
assert(filterSize * filterSize == filterPixels);
assert(filters_rows == filterModuleMult * numFilterColors * filterPixels);
// These routines don't handle the case when only part of the image is visited in the convolution
assert(paddingStart <= 0);
assert(paddingStart + (numModulesX-1)*moduleStride + filterSize >= imgSizeX);
assert(paddingStart + (numModulesY-1)*moduleStride + filterSize >= imgSizeY);
assert(moduleStride <= filterSize);
int imgsPerThread = numImages % 128 == 0 ? 4 : numImages % 64 == 0 ? 2 : 1;
dim3 blocks = numFiltersPerGroup % 32 == 0 ? dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 8))
: dim3(DIVUP(numImages, 32 * imgsPerThread), (numModules * numFilters) / (4 * 4));
if( warmup ) {
blocks = dim3(4, 16);
}
dim3 threads(32, 4);
bool checkImgBounds = numImages % (32*imgsPerThread) != 0;
printf("blocks(%d, %d, %d), threads(%d, %d, %d)\n", blocks.x, blocks.y, blocks.z, threads.x, threads.y, threads.z);
/*
if (scaleTargets == 0) {
targets.resize(numFilters * numModules, numImages);
} else {
assert(targets.getNumRows() == numFilters * numModules);
assert(targets.getNumCols() == numImages);
}
*/
assert(targets_rows == numFilters * numModules);
assert(targets_cols == numImages);
printf("\n\n");
printf("filters.getNumCols = %d, filters.getnumrows = %d, images.getNumCols = %d, images.getNumRows = %d, targets.getNumcols = %d, targets.getNumrows = %d\n\n",
filters_cols, filters_rows, images_cols, images_rows, targets_cols, targets_rows);
printf("\n\n\n====== Kernel Parameters ======\n\n");
printf("images = %p\n"
"filters = %p\n"
"targets = %p\n"
"numImages = %d\n"
"numFilters = %d\n"
"imgSizeY = %d\n"
"imgSizeX = %d\n"
"filterSize = %d\n"
"paddingStart = %d\n"
"moduleStride = %d\n"
"numModulesY = %d\n"
"numModulesX = %d\n"
"imgStride = %d\n"
"scaleTargts = %lf\n"
"scaleOutputs = %lf\n"
"conv = %d\n"
"numImgColors = %d\n"
"imgsPerThread = %d\n"
"numGroups = %d\n"
"checkImgBounds = %d\n"
"numFiltersPerGroup = %d\n"
"blocks = %d, %d, %d\n"
"threads = %d, %d, %d\n"
"\n===================================\n",
images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart,
moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv,
numImgColors, imgsPerThread, numGroups, checkImgBounds, numFiltersPerGroup, blocks.x, blocks.y, blocks.z, threads.x, threads.y, threads.z);
#if 0
dim3 tmpBlocks(4, 64, 1);
//filterActs_YxX_color < 4, 32, 1, 4, 1, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
filterActs_YxX_color<<<tmpBlocks, threads, 0, stream>>>(images, filters, targets, numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput,
#endif
if (imgsPerThread == 4) {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
////cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 1, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 1, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 8, 3, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 4, 4, 3, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
} else if (imgsPerThread == 2) {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 1, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 1, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 1, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 1, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 3, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 3, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 3, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 3, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 1, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 1, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 1, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 1, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 3, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 3, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 8, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 8, 3, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 2, 4, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 2, 4, 3, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 8, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 4, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 8, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 4, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 8, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 4, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 8, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 2, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 2, 4, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (numImgColors <= 3) {
assert(numGroups == 1); // It has to be based on above definitions, but just to be sure.
if (scaleTargets == 0) { // don't scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
printf("\n\n\n\ I AM HERE \n\n\n");
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
printf("\n\n\n\nBING HERE\n\n\n\n");
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
} else { // do scale
if (numImgColors == 1) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 1, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 1, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 1, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 2) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 2, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
} else if (numImgColors == 3) {
if (checkImgBounds) {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, true, true >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
} else {
if (numFilters % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 8, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 8, 3, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 1, 4, 3, true, false >, cudaFuncCachePreferShared);
filterActs_YxX_color < 4, 32, 1, 4, 3, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, scaleTargets, scaleOutput, conv);
}
}
}
}
} else {
if (scaleTargets == 0) { // don't scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, true >, cudaFuncCachePreferShared);
printf("\n\n\n\n\n BING BING BING \n\n\n\n\n");
filterActs_YxX_sparse < 4, 32, 1, 8, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 4, 2, false, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 8, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 4, 2, false, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
} else { // do scale
if (checkImgBounds) {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 8, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, true >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 4, 2, true, true > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
} else {
if (numFiltersPerGroup % 32 == 0) {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 8, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 8, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
} else {
//cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 1, 4, 2, false, false >, cudaFuncCachePreferShared);
filterActs_YxX_sparse < 4, 32, 1, 4, 2, true, false > <<<blocks, threads, 0, stream>>>(images, filters, targets,
numImages, numFilters, imgSizeY, imgSizeX, filterSize, paddingStart, moduleStride, numModulesY, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv);
}
}
}
}
}
}
__device__ long long src_mac_arr_d[8] = {0x6c10bb211b00, 0x6d10bb211b00, 0x64d2bd211b00, 0x65d2bd211b00,
0xc8a610ca0568, 0xc9a610ca0568, 0xa2a610ca0568, 0xa3a610ca0568};
__device__ long long dst_mac_arr_d[8] = {0x36d3bd211b00, 0x37d3bd211b00, 0x44d7a3211b00, 0x45d7a3211b00,
0xa8d6a3211b00, 0xa9d6a3211b00, 0x0ad7a3211b00, 0x0bd7a3211b00};
__device__ uint32_t ipv6_port_lookup(uint16_t* tbl24, uint16_t* tbl8, ipv6_pkt_hdr_normal* pkt)
{
int status;
uint8_t first_byte;
uint32_t tbl24_index, tbl8_index, tbl_entry;
first_byte = 3;
uint32_t addr = pkt->ip_saddr1;
tbl24_index = (addr >> 8);
tbl_entry = tbl24[tbl24_index];
uint32_t offset = 0;
do {
if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) == RTE_LPM6_VALID_EXT_ENTRY_BITMASK) {
if (first_byte == 4) {
addr = pkt->ip_saddr2;
offset = 24;
} else if (first_byte == 8) {
addr = pkt->ip_saddr3;
offset = 24;
} else if (first_byte == 12) {
addr = pkt->ip_saddr4;
offset = 24;
}
uint8_t x = (uint8_t)((addr >> offset) & 0xFF);
tbl8_index = x + ((tbl_entry & RTE_LPM6_TBL8_BITMASK) * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES);
tbl_entry = tbl8[tbl8_index];
first_byte++;
offset -= 8;
status = 1;
} else {
status = 0;
}
} while (status == 1);
return tbl_entry;
}
__global__ void ipv6_fwd_kernel(ipv6_pkt_hdr_normal* packet_batch, uint16_t* tbl24, uint16_t* tbl8, unsigned n_pkts, int* reg_buffer, bool save_regs)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < n_pkts) {
ipv6_pkt_hdr_normal* pkt = &packet_batch[gid];
uint32_t tbl_entry = ipv6_port_lookup(tbl24, tbl8, pkt);
packet_batch[gid].ether_dhost_1 = (uint32_t)(dst_mac_arr_d[tbl_entry] >> 16);
packet_batch[gid].ether_dhost_2 = (uint32_t)(dst_mac_arr_d[tbl_entry] & 0xFFFF);
packet_batch[gid].ether_shost_1 = (uint32_t)(src_mac_arr_d[tbl_entry] >> 16);
packet_batch[gid].ether_shost_2 = (uint32_t)(src_mac_arr_d[tbl_entry] & 0xFFFF);
}
}
__global__ void ipv6_fwd_kernel_save_regs(ipv6_pkt_hdr_normal* packet_batch, uint16_t* tbl24, uint16_t* tbl8, unsigned n_pkts, int* reg_buffer, bool save_regs)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned tid = threadIdx.x;
if (save_regs) {
//save regs
for (int i=0; i<IPV6_REG_NUM; i++) {
//save_register(reg_buffer);
reg_buffer[tid * IPV6_REG_NUM + i] = tid;
}
}
if (gid < n_pkts) {
ipv6_pkt_hdr_normal* pkt = &packet_batch[gid];
uint32_t tbl_entry = ipv6_port_lookup(tbl24, tbl8, pkt);
packet_batch[gid].ether_dhost_1 = (uint32_t)(dst_mac_arr_d[tbl_entry] >> 16);
packet_batch[gid].ether_dhost_2 = (uint32_t)(dst_mac_arr_d[tbl_entry] & 0xFFFF);
packet_batch[gid].ether_shost_1 = (uint32_t)(src_mac_arr_d[tbl_entry] >> 16);
packet_batch[gid].ether_shost_2 = (uint32_t)(src_mac_arr_d[tbl_entry] & 0xFFFF);
}
if (save_regs) {
//save regs
for (int i=0; i<IPV6_REG_NUM; i++) {
tid = reg_buffer[tid * IPV6_REG_NUM + i];
}
}
}
int MatrixMulBase(int argc, char** argv, bool block) {
printf("MatrixMul EDGE Base test\n");
size_t dimAx = 512;
size_t dimAy = 512;
size_t dimBx = 512;
size_t dimBy = 512;
if(argc == 8){
printf("Using command line parameters\n");
// dimx | dimy |
dimAx = atoi(argv[1]);
dimAy = atoi(argv[2]);
dimBx = atoi(argv[3]);
dimBy = atoi(argv[4]);
}else{
printf("Using default parameters\n");
}
size_t dimCx = dimAx;
size_t dimCy = dimBy;
//allocate host mem
unsigned int size_A = dimAx*dimAy;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = dimBx*dimBy;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
unsigned int size_C = dimCx*dimCy;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
printf("initializing host memory\n");
//randomInit(h_A, size_A);
//randomInit(h_B, size_B);
// setup execution parameters
dim3 threads(16, 16, 1);
dim3 grid(MAX(1, dimCx/threads.x), MAX(1, dimCy/threads.y), 1);
float* d_A;
float* d_B;
float* d_C;
printf("Allocating the matrices in GPU mem\n");
CU_CHECK_ERR( cudaMalloc((void**)&d_A, dimAx*dimAy*sizeof(float)) );
CU_CHECK_ERR( cudaMalloc((void**)&d_B, dimBx*dimBy*sizeof(float)) );
CU_CHECK_ERR( cudaMalloc((void**)&d_C, dimAx*dimBy*sizeof(float)) );
printf("Copying the matrices to GPU mem\n");
CU_CHECK_ERR( cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) );
CU_CHECK_ERR( cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) );
matrixMul<<<grid, threads>>>(d_C, d_A, d_B, dimAx, dimBx);
if (block) {
cudaDeviceSynchronize();
CU_CHECK_ERR( cudaMemcpy(d_C, h_C, mem_size_C, cudaMemcpyDeviceToHost) );
//PrintMatrices(h_A, h_B, h_C, dimAx, dimAy, dimBx, dimBy, dimCx, dimCy);
}
printf("Complete\n");
return 0;
}
#define XBLOCK_SIZE 16
#define YBLOCK_SIZE 16
//#include "matrixMul.h"
#define CHECK_BANK_CONFLICTS 0
#if CHECK_BANK_CONFLICTS
#define AS(i, j) cutilBankChecker(((float*)&As[0][0]), (BLOCK_SIZE * i + j))
#define BS(i, j) cutilBankChecker(((float*)&Bs[0][0]), (BLOCK_SIZE * i + j))
#else
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
#endif
////////////////////////////////////////////////////////////////////////////////
//! Matrix multiplication on the device: C = A * B
//! wA is A's width and wB is B's width
////////////////////////////////////////////////////////////////////////////////
__global__ void matrixMul( float* C, float* A, float* B, int wA, int wB) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * XBLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = XBLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = YBLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = YBLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[XBLOCK_SIZE][YBLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[YBLOCK_SIZE][XBLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
AS(ty, tx) = A[a + wA * ty + tx];
BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < YBLOCK_SIZE; ++k)
Csub += AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * XBLOCK_SIZE * by + YBLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
|
523419224cc28a3ccaeed9d17f8d72ee23d1e1df.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/fc.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T, bool DoRelu>
__global__ void InplaceAddReluKernel(const T* bias, T* data, int M, int N) {
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp = data[index] + bias[j];
if (DoRelu) {
data[index] = (tmp > 0) ? tmp : 0;
} else {
data[index] = tmp;
}
index += blockDim.x;
}
}
}
template <typename T>
class FCFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context, const int M,
const int N, const int K, const T* X, const T* W, T* Y,
const T* B = nullptr, bool relu = false,
bool padding_weights = false) {
PADDLE_ENFORCE_EQ(
padding_weights, false,
platform::errors::PermissionDenied(
"Weight padding in fc can not be used in GPU scope."));
auto blas = math::GetBlas<platform::CUDADeviceContext, T>(context);
blas.GEMM(false, false, M, N, K, static_cast<T>(1.0), X, K, W, N,
static_cast<T>(0.0), Y, N);
if (B == NULL) {
return;
}
const int kThreadsPerBlock = 1024;
int max_threads = context.GetMaxPhysicalThreadCount();
int num_threads = ::min(kThreadsPerBlock, (((N + 31) >> 5) << 5));
int num_blocks = ::max(max_threads / num_threads, 1);
if (relu) {
hipLaunchKernelGGL(( InplaceAddReluKernel<
T, true>), dim3(num_blocks), dim3(num_threads), 0, context.stream(), B, Y, M,
N);
} else {
hipLaunchKernelGGL(( InplaceAddReluKernel<
T, false>), dim3(num_blocks), dim3(num_threads), 0, context.stream(), B, Y, M,
N);
}
}
};
template class FCFunctor<platform::CUDADeviceContext, float>;
template class FCFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| 523419224cc28a3ccaeed9d17f8d72ee23d1e1df.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include "paddle/fluid/operators/math/blas.h"
#include "paddle/fluid/operators/math/fc.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T, bool DoRelu>
__global__ void InplaceAddReluKernel(const T* bias, T* data, int M, int N) {
for (int i = blockIdx.x; i < M; i += gridDim.x) {
int index = i * N + threadIdx.x;
for (int j = threadIdx.x; j < N; j += blockDim.x) {
T tmp = data[index] + bias[j];
if (DoRelu) {
data[index] = (tmp > 0) ? tmp : 0;
} else {
data[index] = tmp;
}
index += blockDim.x;
}
}
}
template <typename T>
class FCFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context, const int M,
const int N, const int K, const T* X, const T* W, T* Y,
const T* B = nullptr, bool relu = false,
bool padding_weights = false) {
PADDLE_ENFORCE_EQ(
padding_weights, false,
platform::errors::PermissionDenied(
"Weight padding in fc can not be used in GPU scope."));
auto blas = math::GetBlas<platform::CUDADeviceContext, T>(context);
blas.GEMM(false, false, M, N, K, static_cast<T>(1.0), X, K, W, N,
static_cast<T>(0.0), Y, N);
if (B == NULL) {
return;
}
const int kThreadsPerBlock = 1024;
int max_threads = context.GetMaxPhysicalThreadCount();
int num_threads = std::min(kThreadsPerBlock, (((N + 31) >> 5) << 5));
int num_blocks = std::max(max_threads / num_threads, 1);
if (relu) {
InplaceAddReluKernel<
T, true><<<num_blocks, num_threads, 0, context.stream()>>>(B, Y, M,
N);
} else {
InplaceAddReluKernel<
T, false><<<num_blocks, num_threads, 0, context.stream()>>>(B, Y, M,
N);
}
}
};
template class FCFunctor<platform::CUDADeviceContext, float>;
template class FCFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
41ecbf25fe248e6f83b08c99a41c31ff0c8b62fa.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <cstdio>
#include <vector>
#include <chrono>
#include <assert.h>
#include <hipsparse.h>
#include <rocblas.h>
using namespace std;
const int n = 10000;
const int n_feature = 128;
const double p = .01;
const int max_e = (int)(n * n * p * 3);
size_t f_size = n * n_feature * sizeof(float);
float f1[n * n_feature], f2[n * n_feature];
float *d_f1, *d_f2;
int indices[max_e], values[max_e];
int *d_indices, *d_values;
float *d_a;
int tote(0);
hipblasHandle_t cublasH = NULL;
hipsparseHandle_t cusparseH = NULL;
hipStream_t stream = NULL;
hipsparseMatDescr_t descrA = NULL;
void prepareData() {
srand(time(0));
const int k = 1e6;
tote = 0;
for (int i = 0; i < n; ++i) {
indices[i] = tote;
for (int j = 0; j < n; ++j) {
if (rand() % k < p * k) {
values[tote++] = j;
}
}
for (int j = 0; j < n_feature; ++j) {
f1[i * n_feature + j] = rand() % k / (float)k;
f2[i * n_feature + j] = 0;
}
}
indices[n] = tote;
printf("Graph edges %d\n", tote);
hipMalloc(&d_indices, (n + 1)* sizeof(int));
hipMemcpy(d_indices, indices, (n + 1)* sizeof(int), hipMemcpyHostToDevice);
hipMalloc(&d_values, tote * sizeof(int));
hipMemcpy(d_values, values, tote * sizeof(int), hipMemcpyHostToDevice);
float *a = new float[tote];
for (int i = 0; i < tote; ++i) {
a[i] = 1.;
}
hipMalloc(&d_a, tote * sizeof(float));
hipMemcpy(d_a, a, tote * sizeof(float), hipMemcpyHostToDevice);
hipMalloc(&d_f1, f_size);
hipMemcpy(d_f1, f1, f_size, hipMemcpyHostToDevice);
hipMalloc(&d_f2, f_size);
hipMemset(d_f2, 0, f_size);
hipStreamCreateWithFlags(&stream, hipStreamNonBlocking);
auto cublasStat = hipblasCreate(&cublasH);
assert(HIPBLAS_STATUS_SUCCESS == cublasStat);
cublasStat = hipblasSetStream(cublasH, stream);
assert(HIPBLAS_STATUS_SUCCESS == cublasStat);
auto cusparseStat = hipsparseCreate(&cusparseH);
assert(HIPSPARSE_STATUS_SUCCESS == cusparseStat);
cusparseStat = hipsparseSetStream(cusparseH, stream);
assert(HIPSPARSE_STATUS_SUCCESS == cusparseStat);
/* step 2: configuration of matrix A */
cusparseStat = hipsparseCreateMatDescr(&descrA);
assert(HIPSPARSE_STATUS_SUCCESS == cusparseStat);
hipsparseSetMatIndexBase(descrA,HIPSPARSE_INDEX_BASE_ZERO);
hipsparseSetMatType(descrA, HIPSPARSE_MATRIX_TYPE_GENERAL );
hipDeviceSynchronize();
}
inline double getDuration(std::chrono::time_point<std::chrono::system_clock> a,
std::chrono::time_point<std::chrono::system_clock> b) {
return std::chrono::duration<double>(b - a).count();
}
#define timestamp(__var__) auto __var__ = std::chrono::system_clock::now();
double runOnce() {
float alpha = 1.;
float beta = 0.;
timestamp(t1);
for (int i = 0; i < 10; ++i) {
hipsparseScsrmm(cusparseH, HIPSPARSE_OPERATION_NON_TRANSPOSE,
n, n_feature, n, tote, &alpha,
descrA, d_a, d_indices, d_values,
d_f1, n, &beta, d_f2, n);
}
hipStreamSynchronize(stream);
timestamp(t2);
hipMemcpy(f2, d_f2, f_size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
float sum(0);
for (int i = 0; i < n; ++i) {
for (int k = 0; k < n_feature; ++k) {
sum += f2[i * n_feature + k];
}
}
fprintf(stderr, "F20 %f Check sum %f\n", f2[0], sum);
return getDuration(t1, t2);
}
int main() {
prepareData();
double total_time = 0;
int times = 10;
fprintf(stderr, "Ready\n");
for (int i = 0; i < times; ++i) {
total_time += runOnce();
}
fprintf(stderr, "Avg time %.9lf s\n", total_time / times);
}
| 41ecbf25fe248e6f83b08c99a41c31ff0c8b62fa.cu | #include <iostream>
#include <cstdlib>
#include <ctime>
#include <cstdio>
#include <vector>
#include <chrono>
#include <assert.h>
#include <cusparse.h>
#include <cublas_v2.h>
using namespace std;
const int n = 10000;
const int n_feature = 128;
const double p = .01;
const int max_e = (int)(n * n * p * 3);
size_t f_size = n * n_feature * sizeof(float);
float f1[n * n_feature], f2[n * n_feature];
float *d_f1, *d_f2;
int indices[max_e], values[max_e];
int *d_indices, *d_values;
float *d_a;
int tote(0);
cublasHandle_t cublasH = NULL;
cusparseHandle_t cusparseH = NULL;
cudaStream_t stream = NULL;
cusparseMatDescr_t descrA = NULL;
void prepareData() {
srand(time(0));
const int k = 1e6;
tote = 0;
for (int i = 0; i < n; ++i) {
indices[i] = tote;
for (int j = 0; j < n; ++j) {
if (rand() % k < p * k) {
values[tote++] = j;
}
}
for (int j = 0; j < n_feature; ++j) {
f1[i * n_feature + j] = rand() % k / (float)k;
f2[i * n_feature + j] = 0;
}
}
indices[n] = tote;
printf("Graph edges %d\n", tote);
cudaMalloc(&d_indices, (n + 1)* sizeof(int));
cudaMemcpy(d_indices, indices, (n + 1)* sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&d_values, tote * sizeof(int));
cudaMemcpy(d_values, values, tote * sizeof(int), cudaMemcpyHostToDevice);
float *a = new float[tote];
for (int i = 0; i < tote; ++i) {
a[i] = 1.;
}
cudaMalloc(&d_a, tote * sizeof(float));
cudaMemcpy(d_a, a, tote * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc(&d_f1, f_size);
cudaMemcpy(d_f1, f1, f_size, cudaMemcpyHostToDevice);
cudaMalloc(&d_f2, f_size);
cudaMemset(d_f2, 0, f_size);
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
auto cublasStat = cublasCreate(&cublasH);
assert(CUBLAS_STATUS_SUCCESS == cublasStat);
cublasStat = cublasSetStream(cublasH, stream);
assert(CUBLAS_STATUS_SUCCESS == cublasStat);
auto cusparseStat = cusparseCreate(&cusparseH);
assert(CUSPARSE_STATUS_SUCCESS == cusparseStat);
cusparseStat = cusparseSetStream(cusparseH, stream);
assert(CUSPARSE_STATUS_SUCCESS == cusparseStat);
/* step 2: configuration of matrix A */
cusparseStat = cusparseCreateMatDescr(&descrA);
assert(CUSPARSE_STATUS_SUCCESS == cusparseStat);
cusparseSetMatIndexBase(descrA,CUSPARSE_INDEX_BASE_ZERO);
cusparseSetMatType(descrA, CUSPARSE_MATRIX_TYPE_GENERAL );
cudaDeviceSynchronize();
}
inline double getDuration(std::chrono::time_point<std::chrono::system_clock> a,
std::chrono::time_point<std::chrono::system_clock> b) {
return std::chrono::duration<double>(b - a).count();
}
#define timestamp(__var__) auto __var__ = std::chrono::system_clock::now();
double runOnce() {
float alpha = 1.;
float beta = 0.;
timestamp(t1);
for (int i = 0; i < 10; ++i) {
cusparseScsrmm(cusparseH, CUSPARSE_OPERATION_NON_TRANSPOSE,
n, n_feature, n, tote, &alpha,
descrA, d_a, d_indices, d_values,
d_f1, n, &beta, d_f2, n);
}
cudaStreamSynchronize(stream);
timestamp(t2);
cudaMemcpy(f2, d_f2, f_size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
float sum(0);
for (int i = 0; i < n; ++i) {
for (int k = 0; k < n_feature; ++k) {
sum += f2[i * n_feature + k];
}
}
fprintf(stderr, "F20 %f Check sum %f\n", f2[0], sum);
return getDuration(t1, t2);
}
int main() {
prepareData();
double total_time = 0;
int times = 10;
fprintf(stderr, "Ready\n");
for (int i = 0; i < times; ++i) {
total_time += runOnce();
}
fprintf(stderr, "Avg time %.9lf s\n", total_time / times);
}
|
a8383e78ff64824d53f18be7568cc28a36356062.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <stdio.h>
#include "common.h"
__global__ void checkIndex(void){
printf("threadIdx:(%d,%d,%d) blockIdx:(%d,%d,%d) blockDim:(%d,%d,%d) "
"gridDim: (%d,%d,%d)\n", threadIdx.x,threadIdx.y,threadIdx.z,
blockIdx.x,blockIdx.y,blockIdx.z,blockDim.x,blockDim.y,
blockDim.z,gridDim.x,gridDim.y,gridDim.z
);
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
// why no boundry check?
int i=threadIdx.x+blockIdx.x*blockDim.x;
if (i<N)
C[i]=A[i]+B[i];
}
int checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon= 1.0E-8;
bool match = 1;
int error=-1;
for(int i=0;i<N;i++){
if (abs(hostRef[i]-gpuRef[i])>epsilon){
match=0;
error=i;
printf("Arrays don't match!\n");
printf("host %5.2f gpu %5.2f at current %d\n",hostRef[i],gpuRef[i],i);
break;
}
}
if (match) printf("Arrays match. \n\n");
return error;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N){
// CPU version of the kernel
for(int idx=0; idx<N;idx++){
C[idx]=A[idx]+B[idx];
}
// no need to return C
}
void initialData(float *ip, int size){
time_t t;
srand((unsigned int) time(&t));
// initialize random number with seed of time
// time(&t) <==> t=time(NULL); assign the current time to t
for (int i=0; i<size; i++){
ip[i] = (float) ( rand() & 0xFF )/10.0f; // rand() returns a random number between 0 and RAND_MAX
//0xFF is 255
}
}
int main(int argc, char **argv){
printf("%s Starting ...\n",argv[0]);
int nElem = 32*1e3;//1024;
printf("Vector size is %d\n",nElem);
// allocate memory
size_t nBytes = nElem * sizeof(float);
//printf("%d,%d\n",0xFF,RAND_MAX);
/*******************CPU part********************/
float *h_A, *h_B, *h_C, *gpuRef;
// h_C = h_A + h_B
// d_C copy to gpuRef
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
initialData(h_A,nElem);
initialData(h_B,nElem);
memset(h_C,0,nBytes);
memset(gpuRef,-1,nBytes);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float cpumilliseconds = 0;
double iStart,iElaps;
iStart=cpuSecond();
hipEventRecord(start);
sumArraysOnHost(h_A,h_B,h_C,nElem);
hipEventRecord(stop);
hipEventElapsedTime(&cpumilliseconds, start, stop);
iElaps=cpuSecond()-iStart;
printf("\nCPU timer\n");
printf("sumArraysOnHost Time Elapsed %f\n",iElaps);
/*******************CPU part********************/
/*******************GPU part********************/
int dev=0;
hipSetDevice(dev); // use the device_id=0 GPU;
dim3 block(1024);
dim3 grid((nElem+block.x-1)/block.x); // how the grid is calculated?
// understand index
//checkIndex <<<grid,block>>>();
//hipDeviceReset();
float *d_A, *d_B, *d_C;
hipMalloc((float**)&d_A, nBytes);
hipMalloc((float**)&d_B, nBytes);
hipMalloc((float**)&d_C, nBytes);
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
float beforesync,gpumilliseconds,gpumillisecondsbeforesync;
iStart=cpuSecond();
hipEventRecord(start);
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid),dim3(block), 0, 0, d_A, d_B, d_C,nElem);
hipEventRecord(stop);
hipEventSynchronize(stop); // the later hipDeviceSynchronize doesn't achieve the same
hipEventElapsedTime(&gpumillisecondsbeforesync, start, stop);
beforesync=cpuSecond()-iStart;
CHECK(hipDeviceSynchronize());
iElaps=cpuSecond()-iStart;
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&gpumilliseconds, start, stop);
printf("sumArraysOnGPU Time Elapsed %f before sync %f\n\n",iElaps,beforesync);
printf("\nGPU timer\n");
printf("sumArraysOnHost Time Elapsed %f\n",cpumilliseconds);
printf("sumArraysOnGPU Time Elapsed %f before sync %f\n\n",gpumilliseconds,gpumillisecondsbeforesync);
printf("Kernel configuration: (%d,%d,%d),(%d,%d,%d)\n",grid.x,grid.y,grid.z,block.x,block.y,block.z);
hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
int error=checkResult(h_C,gpuRef,nElem);
//printf("%f+%f=%f\n",h_A[error],h_B[error],h_A[error]+h_B[error]);
free(h_A);
free(h_B);
free(h_C);
free(gpuRef);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
return(0);
}
| a8383e78ff64824d53f18be7568cc28a36356062.cu | #include <cuda_runtime.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <stdio.h>
#include "common.h"
__global__ void checkIndex(void){
printf("threadIdx:(%d,%d,%d) blockIdx:(%d,%d,%d) blockDim:(%d,%d,%d) "
"gridDim: (%d,%d,%d)\n", threadIdx.x,threadIdx.y,threadIdx.z,
blockIdx.x,blockIdx.y,blockIdx.z,blockDim.x,blockDim.y,
blockDim.z,gridDim.x,gridDim.y,gridDim.z
);
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
// why no boundry check?
int i=threadIdx.x+blockIdx.x*blockDim.x;
if (i<N)
C[i]=A[i]+B[i];
}
int checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon= 1.0E-8;
bool match = 1;
int error=-1;
for(int i=0;i<N;i++){
if (abs(hostRef[i]-gpuRef[i])>epsilon){
match=0;
error=i;
printf("Arrays don't match!\n");
printf("host %5.2f gpu %5.2f at current %d\n",hostRef[i],gpuRef[i],i);
break;
}
}
if (match) printf("Arrays match. \n\n");
return error;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N){
// CPU version of the kernel
for(int idx=0; idx<N;idx++){
C[idx]=A[idx]+B[idx];
}
// no need to return C
}
void initialData(float *ip, int size){
time_t t;
srand((unsigned int) time(&t));
// initialize random number with seed of time
// time(&t) <==> t=time(NULL); assign the current time to t
for (int i=0; i<size; i++){
ip[i] = (float) ( rand() & 0xFF )/10.0f; // rand() returns a random number between 0 and RAND_MAX
//0xFF is 255
}
}
int main(int argc, char **argv){
printf("%s Starting ...\n",argv[0]);
int nElem = 32*1e3;//1024;
printf("Vector size is %d\n",nElem);
// allocate memory
size_t nBytes = nElem * sizeof(float);
//printf("%d,%d\n",0xFF,RAND_MAX);
/*******************CPU part********************/
float *h_A, *h_B, *h_C, *gpuRef;
// h_C = h_A + h_B
// d_C copy to gpuRef
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
initialData(h_A,nElem);
initialData(h_B,nElem);
memset(h_C,0,nBytes);
memset(gpuRef,-1,nBytes);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float cpumilliseconds = 0;
double iStart,iElaps;
iStart=cpuSecond();
cudaEventRecord(start);
sumArraysOnHost(h_A,h_B,h_C,nElem);
cudaEventRecord(stop);
cudaEventElapsedTime(&cpumilliseconds, start, stop);
iElaps=cpuSecond()-iStart;
printf("\nCPU timer\n");
printf("sumArraysOnHost Time Elapsed %f\n",iElaps);
/*******************CPU part********************/
/*******************GPU part********************/
int dev=0;
cudaSetDevice(dev); // use the device_id=0 GPU;
dim3 block(1024);
dim3 grid((nElem+block.x-1)/block.x); // how the grid is calculated?
// understand index
//checkIndex <<<grid,block>>>();
//cudaDeviceReset();
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
float beforesync,gpumilliseconds,gpumillisecondsbeforesync;
iStart=cpuSecond();
cudaEventRecord(start);
sumArraysOnGPU<<<grid,block>>>(d_A, d_B, d_C,nElem);
cudaEventRecord(stop);
cudaEventSynchronize(stop); // the later cudaDeviceSynchronize doesn't achieve the same
cudaEventElapsedTime(&gpumillisecondsbeforesync, start, stop);
beforesync=cpuSecond()-iStart;
CHECK(cudaDeviceSynchronize());
iElaps=cpuSecond()-iStart;
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpumilliseconds, start, stop);
printf("sumArraysOnGPU Time Elapsed %f before sync %f\n\n",iElaps,beforesync);
printf("\nGPU timer\n");
printf("sumArraysOnHost Time Elapsed %f\n",cpumilliseconds);
printf("sumArraysOnGPU Time Elapsed %f before sync %f\n\n",gpumilliseconds,gpumillisecondsbeforesync);
printf("Kernel configuration: (%d,%d,%d),(%d,%d,%d)\n",grid.x,grid.y,grid.z,block.x,block.y,block.z);
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
int error=checkResult(h_C,gpuRef,nElem);
//printf("%f+%f=%f\n",h_A[error],h_B[error],h_A[error]+h_B[error]);
free(h_A);
free(h_B);
free(h_C);
free(gpuRef);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return(0);
}
|
6746e7887fad28f4f1f59d51fdcf78c2d4143301.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h> // for printf
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "../include/FullyDiscreteKurganovTadmorScheme.cuh"
#include "../include/LatticeParameters.h"
#include "../include/DynamicalVariables.cuh"
#include "../include/SemiDiscreteKurganovTadmorScheme.cuh"
#include "../include/HalfSiteExtrapolation.cuh"
#include "../include/FluxFunctions.cuh"
#include "../include/SpectralRadius.cuh"
#include "../include/SourceTerms.cuh"
#include "../include/EnergyMomentumTensor.cuh"
#include "../include/CudaConfiguration.cuh"
#include "../include/RegulateDissipativeCurrents.cuh"
/**************************************************************************************************************************************************\
__device__
void setNeighborCells(const PRECISION * const __restrict__ data,
PRECISION * const __restrict__ I, PRECISION * const __restrict__ J, PRECISION * const __restrict__ K, PRECISION * const __restrict__ Q,
int s, unsigned int n, int ptr, int simm, int sim, int sip, int sipp, int sjmm, int sjm, int sjp, int sjpp, int skmm, int skm, int skp, int skpp) {
PRECISION data_ns = data[s];
// I
*(I+ptr) = *(data+simm);
*(I+ptr+1) = *(data+sim);
*(I+ptr+2) = data_ns;
*(I+ptr+3) = *(data+sip);
*(I+ptr+4) = *(data+sipp);
// J
*(J+ptr) = *(data+sjmm);
*(J+ptr+1) = *(data+sjm);
*(J+ptr+2) = data_ns;
*(J+ptr+3) = *(data+sjp);
*(J+ptr+4) = *(data+sjpp);
// K
*(K+ptr) = *(data+skmm);
*(K+ptr+1) = *(data+skm);
*(K+ptr+2) = data_ns;
*(K+ptr+3) = *(data+skp);
*(K+ptr+4) = *(data+skpp);
// Q
*(Q + n) = data_ns;
}
__global__
void eulerStepKernel(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const PRECISION * const __restrict__ e, const PRECISION * const __restrict__ p,
const FLUID_VELOCITY * const __restrict__ u, const FLUID_VELOCITY * const __restrict__ up
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
PRECISION I[5 * NUMBER_CONSERVED_VARIABLES], J[5* NUMBER_CONSERVED_VARIABLES], K[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION hpx[NUMBER_CONSERVED_VARIABLES], hmx[NUMBER_CONSERVED_VARIABLES],
hpy[NUMBER_CONSERVED_VARIABLES], hmy[NUMBER_CONSERVED_VARIABLES],
hpz[NUMBER_CONSERVED_VARIABLES], hmz[NUMBER_CONSERVED_VARIABLES];
PRECISION Q[NUMBER_CONSERVED_VARIABLES], S[NUMBER_CONSERVED_VARIABLES];
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
// calculate neighbor cell indices;
int sim = s-1;
int simm = sim-1;
int sip = s+1;
int sipp = sip+1;
int sjm = s-d_ncx;
int sjmm = sjm-d_ncx;
int sjp = s+d_ncx;
int sjpp = sjp+d_ncx;
int stride = d_ncx * d_ncy;
int skm = s-stride;
int skmm = skm-stride;
int skp = s+stride;
int skpp = skp+stride;
int ptr = 0;
setNeighborCells(currrentVars->ttt,I,J,K,Q,s,0,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->ttx,I,J,K,Q,s,1,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->tty,I,J,K,Q,s,2,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->ttn,I,J,K,Q,s,3,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitt,I,J,K,Q,s,4,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitx,I,J,K,Q,s,5,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pity,I,J,K,Q,s,6,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitn,I,J,K,Q,s,7,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixx,I,J,K,Q,s,8,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixy,I,J,K,Q,s,9,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixn,I,J,K,Q,s,10,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->piyy,I,J,K,Q,s,11,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->piyn,I,J,K,Q,s,12,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pinn,I,J,K,Q,s,13,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp);
flux(I, hpx, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusX, &Fx, t, e[s]);
flux(I, hmx, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusX, &Fx, t, e[s]);
flux(J, hpy, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusY, &Fy, t, e[s]);
flux(J, hmy, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusY, &Fy, t, e[s]);
flux(K, hpz, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusZ, &Fz, t, e[s]);
flux(K, hmz, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusZ, &Fz, t, e[s]);
loadSourceTerms(I, J, K, Q, S, u, up->ut[s], up->ux[s], up->uy[s], up->un[s], t, e[s], p, s);
PRECISION result[NUMBER_CONSERVED_VARIABLES];
for (int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = *(Q+n) + d_dt * ( *(S+n) - ( *(hpx+n) - *(hmx+n) + *(hpy+n) - *(hmy+n) ) / d_dx - ( *(hpz+n) - *(hmz+n) )/d_dz );
}
updatedVars->ttt[s] = result[0];
updatedVars->ttx[s] = result[1];
updatedVars->tty[s] = result[2];
updatedVars->ttn[s] = result[3];
updatedVars->pitt[s] = result[4];
updatedVars->pitx[s] = result[5];
updatedVars->pity[s] = result[6];
updatedVars->pitn[s] = result[7];
updatedVars->pixx[s] = result[8];
updatedVars->pixy[s] = result[9];
updatedVars->pixn[s] = result[10];
updatedVars->piyy[s] = result[11];
updatedVars->piyn[s] = result[12];
updatedVars->pinn[s] = result[13];
}
/**************************************************************************************************************************************************/
/**************************************************************************************************************************************************\
__device__
void setNeighborCells2(const PRECISION * const __restrict__ in, PRECISION * const __restrict__ out, PRECISION data_ns,
int ptr, int smm, int sm, int sp, int spp
) {
*(out + ptr ) = in[smm];
*(out + ptr + 1) = in[sm];
*(out + ptr + 2) = data_ns;
*(out + ptr + 3) = in[sp];
*(out + ptr + 4) = in[spp];
}
__device__
void setNeighborCells3(const PRECISION * const __restrict__ in, PRECISION * const __restrict__ out, int ptr, int smm, int sm, int sp, int spp
) {
*(out + ptr ) = in[smm];
*(out + ptr + 1) = in[sm];
*(out + ptr + 3) = in[sp];
*(out + ptr + 4) = in[spp];
}
__global__
void eulerStepKernel_1D(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const PRECISION * const __restrict__ e, const PRECISION * const __restrict__ p,
const FLUID_VELOCITY * const __restrict__ u, const FLUID_VELOCITY * const __restrict__ up
) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
PRECISION I[5 * NUMBER_CONSERVED_VARIABLES], J[5* NUMBER_CONSERVED_VARIABLES], K[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES], Q[NUMBER_CONSERVED_VARIABLES];
// calculate neighbor cell indices;
int sim = s-1;
int simm = sim-1;
int sip = s+1;
int sipp = sip+1;
int sjm = s-d_ncx;
int sjmm = sjm-d_ncx;
int sjp = s+d_ncx;
int sjpp = sjp+d_ncx;
int stride = d_ncx * d_ncy;
int skm = s-stride;
int skmm = skm-stride;
int skp = s+stride;
int skpp = skp+stride;
int ptr = 0;
setNeighborCells(currrentVars->ttt,I,J,K,Q,s,0,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->ttx,I,J,K,Q,s,1,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->tty,I,J,K,Q,s,2,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->ttn,I,J,K,Q,s,3,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitt,I,J,K,Q,s,4,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitx,I,J,K,Q,s,5,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pity,I,J,K,Q,s,6,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitn,I,J,K,Q,s,7,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixx,I,J,K,Q,s,8,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixy,I,J,K,Q,s,9,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixn,I,J,K,Q,s,10,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->piyy,I,J,K,Q,s,11,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->piyn,I,J,K,Q,s,12,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pinn,I,J,K,Q,s,13,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->Pi,I,J,K,Q,s,14,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp);
//=======================================================================================================================
PRECISION es = e[s];
loadSourceTerms(I, J, K, Q, H, u, up->ut[s], up->ux[s], up->uy[s], up->un[s], t, es, p, s);
PRECISION result[NUMBER_CONSERVED_VARIABLES];
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = *(Q+n) + d_dt * ( *(H+n) );
}
// X derivatives
PRECISION facX = d_dt/d_dx;
flux(I, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusX, &Fx, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) -= *(H+n)*facX;
}
flux(I, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusX, &Fx, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n)*facX;
}
// Y derivatives
flux(I, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusY, &Fy, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) -= *(H+n)*facX;
}
flux(I, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusY, &Fy, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n)*facX;
}
// Z derivatives
PRECISION facZ = d_dt/d_dz;
flux(I, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusZ, &Fz, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) -= *(H+n)*facZ;
}
flux(I, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusZ, &Fz, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n)*facZ;
}
//=======================================================================================================================
updatedVars->ttt[s] = result[0];
updatedVars->ttx[s] = result[1];
updatedVars->tty[s] = result[2];
updatedVars->ttn[s] = result[3];
#ifdef PIMUNU
updatedVars->pitt[s] = result[4];
updatedVars->pitx[s] = result[5];
updatedVars->pity[s] = result[6];
updatedVars->pitn[s] = result[7];
updatedVars->pixx[s] = result[8];
updatedVars->pixy[s] = result[9];
updatedVars->pixn[s] = result[10];
updatedVars->piyy[s] = result[11];
updatedVars->piyn[s] = result[12];
updatedVars->pinn[s] = result[13];
#endif
#ifdef PI
updatedVars->Pi[s] = result[14];
#endif
}
}
/**************************************************************************************************************************************************/
/**************************************************************************************************************************************************/
__global__
void eulerStepKernelSource(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const PRECISION * const __restrict__ e, const PRECISION * const __restrict__ p,
const FLUID_VELOCITY * const __restrict__ u, const FLUID_VELOCITY * const __restrict__ up
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
PRECISION Q[NUMBER_CONSERVED_VARIABLES];
PRECISION S[NUMBER_CONSERVED_VARIABLES];
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
Q[0] = currrentVars->ttt[s];
Q[1] = currrentVars->ttx[s];
Q[2] = currrentVars->tty[s];
Q[3] = currrentVars->ttn[s];
#ifdef PIMUNU
Q[4] = currrentVars->pitt[s];
Q[5] = currrentVars->pitx[s];
Q[6] = currrentVars->pity[s];
Q[7] = currrentVars->pitn[s];
Q[8] = currrentVars->pixx[s];
Q[9] = currrentVars->pixy[s];
Q[10] = currrentVars->pixn[s];
Q[11] = currrentVars->piyy[s];
Q[12] = currrentVars->piyn[s];
Q[13] = currrentVars->pinn[s];
#endif
#ifdef PI
Q[14] = currrentVars->Pi[s];
#endif
loadSourceTerms2(Q, S, u, up->ut[s], up->ux[s], up->uy[s], up->un[s], t, e[s], p, s);
PRECISION result[NUMBER_CONSERVED_VARIABLES];
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = *(Q+n) + d_dt * ( *(S+n) );
}
updatedVars->ttt[s] = result[0];
updatedVars->ttx[s] = result[1];
updatedVars->tty[s] = result[2];
updatedVars->ttn[s] = result[3];
#ifdef PIMUNU
updatedVars->pitt[s] = result[4];
updatedVars->pitx[s] = result[5];
updatedVars->pity[s] = result[6];
updatedVars->pitn[s] = result[7];
updatedVars->pixx[s] = result[8];
updatedVars->pixy[s] = result[9];
updatedVars->pixn[s] = result[10];
updatedVars->piyy[s] = result[11];
updatedVars->piyn[s] = result[12];
updatedVars->pinn[s] = result[13];
#endif
#ifdef PI
updatedVars->Pi[s] = result[14];
#endif
}
}
/**************************************************************************************************************************************************/
/**************************************************************************************************************************************************/
__device__
void setNeighborCellsJK2(const PRECISION * const __restrict__ in, PRECISION * const __restrict__ out,
int s, int ptr, int smm, int sm, int sp, int spp
) {
PRECISION data_ns = in[s];
*(out + ptr ) = in[smm];
*(out + ptr + 1) = in[sm];
*(out + ptr + 2) = data_ns;
*(out + ptr + 3) = in[sp];
*(out + ptr + 4) = in[spp];
}
__global__
void eulerStepKernelX(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
PRECISION I[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
// calculate neighbor cell indices;
int sim = s-1;
int simm = sim-1;
int sip = s+1;
int sipp = sip+1;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,I,s,ptr,simm,sim,sip,sipp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(I, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusX, &Fx, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux(I, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusX, &Fx, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dx;
}
#ifndef IDEAL
loadSourceTermsX(I, H, u, s);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
void eulerStepKernelY(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
PRECISION J[5* NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
// calculate neighbor cell indices;
int sjm = s-d_ncx;
int sjmm = sjm-d_ncx;
int sjp = s+d_ncx;
int sjpp = sjp+d_ncx;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,J,s,ptr,sjmm,sjm,sjp,sjpp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(J, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusY, &Fy, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux(J, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusY, &Fy, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dy;
}
#ifndef IDEAL
loadSourceTermsY(J, H, u, s);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
void eulerStepKernelZ(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
//printf("(i,j,k)=(%d,%d,%d)\n",i,j,k);
PRECISION K[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
// calculate neighbor cell indices;
int stride = d_ncx * d_ncy;
int skm = s-stride;
int skmm = skm-stride;
int skp = s+stride;
int skpp = skp+stride;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,K,s,ptr,skmm,skm,skp,skpp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(K, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusZ, &Fz, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = -*(H+n);
}
flux(K, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusZ, &Fz, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dz;
}
#ifndef IDEAL
loadSourceTermsZ(K, H, u, s, t);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
/**************************************************************************************************************************************************\
/**************************************************************************************************************************************************\
__device__
void setSharedData(const CONSERVED_VARIABLES * const __restrict__ in, PRECISION * const __restrict__ out,
int i, int j, int k, int nx, int ny, int stride, int s) {
int ptr = i + nx*(j + ny*k);
*(out + ptr) = in->ttt[s]; ptr+=stride;
*(out + ptr) = in->ttx[s]; ptr+=stride;
*(out + ptr) = in->tty[s]; ptr+=stride;
*(out + ptr) = in->ttn[s]; ptr+=stride;
*(out + ptr) = in->pitt[s]; ptr+=stride;
*(out + ptr) = in->pitx[s]; ptr+=stride;
*(out + ptr) = in->pity[s]; ptr+=stride;
*(out + ptr) = in->pitn[s]; ptr+=stride;
*(out + ptr) = in->pixx[s]; ptr+=stride;
*(out + ptr) = in->pixy[s]; ptr+=stride;
*(out + ptr) = in->pixn[s]; ptr+=stride;
*(out + ptr) = in->piyy[s]; ptr+=stride;
*(out + ptr) = in->piyn[s]; ptr+=stride;
*(out + ptr) = in->pinn[s]; ptr+=stride;
#ifdef PI
*(out + ptr) = in->Pi[s];
#endif
}
__global__
void eulerStepKernelSharedX(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
__shared__ PRECISION s_data[NUMBER_CONSERVED_VARIABLES*(BSX_X+4)*BSX_Y*BSX_Z];
int tx = threadIdx.x + N_GHOST_CELLS_M;
int ty = threadIdx.y;
int tz = threadIdx.z;
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
int stride = (BSX_X+4)*BSX_Y*BSX_Z;
int ts = tx + (BSX_X+4) * (ty + BSX_Y * tz);
setSharedData(currrentVars, s_data, tx, ty, tz, BSX_X+4, BSX_Y, stride, s);
if(threadIdx.x==0) {
int sim = s-1;
int simm = sim-1;
setSharedData(currrentVars, s_data, tx-1, ty, tz, BSX_X+4, BSX_Y, stride, sim);
setSharedData(currrentVars, s_data, tx-2, ty, tz, BSX_X+4, BSX_Y, stride, simm);
}
if(threadIdx.x==blockDim.x-1) {
int sip = s+1;
int sipp = sip+1;
setSharedData(currrentVars, s_data, tx+1, ty, tz, BSX_X+4, BSX_Y, stride, sip);
setSharedData(currrentVars, s_data, tx+2, ty, tz, BSX_X+4, BSX_Y, stride, sipp);
}
__syncthreads();
//==========================================================================================
PRECISION e_s = e[s];
PRECISION result[NUMBER_CONSERVED_VARIABLES], H[NUMBER_CONSERVED_VARIABLES];
flux2(s_data, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusX, &Fx, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux2(s_data, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusX, &Fx, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dx;
}
// source
PRECISION facX = 1/d_dx/2;
int ptr = ts+4*stride;
PRECISION dxpitt = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpitx = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpity = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpitn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpixx = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpixy = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpixn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=4*stride;
PRECISION ut = u->ut[s];
PRECISION ux = u->ux[s];
PRECISION vx = fdividef(ux, ut);
#ifndef PI
result[0] += dxpitt*vx - dxpitx;
result[1] += dxpitx*vx - dxpixx;
#else
PRECISION dxPi = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX;
result[0] += dxpitt*vx - dxpitx - vx*dxPi;
result[1] += dxpitx*vx - dxpixx - dxPi;
#endif
result[2] += dxpity*vx - dxpixy;
result[3] += dxpitn*vx - dxpixn;
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
//==========================================================================================
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
//__launch_bounds__(1024, 3)
void eulerStepKernelSharedY(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
__shared__ PRECISION s_data[NUMBER_CONSERVED_VARIABLES*(BSY_Y+4)*BSY_X*BSY_Z];
int tx = threadIdx.x;
int ty = threadIdx.y + N_GHOST_CELLS_M;
int tz = threadIdx.z;
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
int stride = (BSY_Y+4)*BSY_X*BSY_Z;
int ts = ty + (BSY_Y+4) * (tx + BSY_X * tz);
setSharedData(currrentVars, s_data, ty, tx, tz, BSY_Y+4, BSY_X, stride, s);
if(threadIdx.y==0) {
int sjm = s-d_ncx;
int sjmm = sjm-d_ncx;
setSharedData(currrentVars, s_data, ty-1, tx, tz, BSY_Y+4, BSY_X, stride, sjm);
setSharedData(currrentVars, s_data, ty-2, tx, tz, BSY_Y+4, BSY_X, stride, sjmm);
}
if(threadIdx.y==blockDim.y-1) {
int sjp = s+d_ncx;
int sjpp = sjp+d_ncx;
setSharedData(currrentVars, s_data, ty+1, tx, tz, BSY_Y+4, BSY_X, stride, sjp);
setSharedData(currrentVars, s_data, ty+2, tx, tz, BSY_Y+4, BSY_X, stride, sjpp);
}
__syncthreads();
//==========================================================================================
PRECISION e_s = e[s];
PRECISION result[NUMBER_CONSERVED_VARIABLES], H[NUMBER_CONSERVED_VARIABLES];
flux2(s_data, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusY, &Fy, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux2(s_data, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusY, &Fy, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dy;
}
// source
PRECISION facY = 1/d_dy/2;
int ptr = ts+4*stride;
PRECISION dypitt = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride;
PRECISION dypitx = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride;
PRECISION dypity = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride;
PRECISION dypitn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride; ptr+=stride;
PRECISION dypixy = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride; ptr+=stride;
PRECISION dypiyy = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride;
PRECISION dypiyn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride; ptr+=stride;
PRECISION ut = u->ut[s];
PRECISION uy = u->uy[s];
PRECISION vy =fdividef(uy, ut);
#ifndef PI
result[0] += dypitt*vy - dypity;
result[2] += dypity*vy - dypiyy;
#else
PRECISION dyPi = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY;
result[0] += dypitt*vy - dypity - vy*dyPi;
result[2] += dypity*vy - dypiyy - dyPi;
#endif
result[1] += dypitx*vy - dypixy;
result[3] += dypitn*vy - dypiyn;
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
//==========================================================================================
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
void eulerStepKernelSharedZ(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
printf("(i,j,k)=(%d,%d,%d)\n",i,j,k);
__shared__ PRECISION s_data[NUMBER_CONSERVED_VARIABLES*(BSZ_Z+4)*BSZ_X*BSZ_Y];
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z + N_GHOST_CELLS_M;
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
int stride = (BSZ_Z+4)*BSZ_X*BSZ_Y;
int ts = tz + (BSZ_Z+4) * (ty + BSZ_Y * tx);
setSharedData(currrentVars, s_data, tz, ty, tx, BSZ_Z+4, BSZ_Y, stride, s);
if(threadIdx.z==0) {
int skm = s-d_ncx*d_ncy;
int skmm = skm-d_ncx*d_ncy;
setSharedData(currrentVars, s_data, tz-1, ty, tx, BSZ_Z+4, BSZ_Y, stride, skm);
setSharedData(currrentVars, s_data, tz-2, ty, tx, BSZ_Z+4, BSZ_Y, stride, skmm);
}
if(threadIdx.z==blockDim.z-1) {
int skp = s+d_ncx*d_ncy;
int skpp = skp+d_ncx*d_ncy;
setSharedData(currrentVars, s_data, tz+1, ty, tx, BSZ_Z+4, BSZ_Y, stride, skp);
setSharedData(currrentVars, s_data, tz+2, ty, tx, BSZ_Z+4, BSZ_Y, stride, skpp);
}
__syncthreads();
//==========================================================================================
PRECISION e_s = e[s];
PRECISION result[NUMBER_CONSERVED_VARIABLES], H[NUMBER_CONSERVED_VARIABLES];
flux2(s_data, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusZ, &Fz, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux2(s_data, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusZ, &Fz, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dz;
}
// source
PRECISION facZ = 1/d_dz/2;
int ptr = ts+4*stride;
PRECISION dnpitt = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride;
PRECISION dnpitx = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride;
PRECISION dnpity = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride;
PRECISION dnpitn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride; ptr+=stride; ptr+=stride;
PRECISION dnpixn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride; ptr+=stride;
PRECISION dnpiyn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride;
PRECISION dnpinn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride;
PRECISION ut = u->ut[s];
PRECISION un = u->un[s];
PRECISION vn = fdividef(un, ut);
#ifndef PI
result[0] += dnpitt*vn - dnpitn;
result[3] += dnpitn*vn - dnpinn;
#else
PRECISION dnPi = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ;
result[0] += dnpitt*vn - dnpitn - vn*dnPi;
result[3] += dnpitn*vn - dnpinn - dnPi/powf(t,2.0f);
#endif
result[1] += dnpitx*vn - dnpixn;
result[2] += dnpity*vn - dnpiyn;
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
//==========================================================================================
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
/**************************************************************************************************************************************************\
/**************************************************************************************************************************************************/
__global__
void eulerStepKernelSource_1D(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const PRECISION * const __restrict__ e, const PRECISION * const __restrict__ p,
const FLUID_VELOCITY * const __restrict__ u, const FLUID_VELOCITY * const __restrict__ up
) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
PRECISION Q[NUMBER_CONSERVED_VARIABLES];
PRECISION S[NUMBER_CONSERVED_VARIABLES];
Q[0] = currrentVars->ttt[s];
Q[1] = currrentVars->ttx[s];
Q[2] = currrentVars->tty[s];
Q[3] = currrentVars->ttn[s];
#ifdef PIMUNU
Q[4] = currrentVars->pitt[s];
Q[5] = currrentVars->pitx[s];
Q[6] = currrentVars->pity[s];
Q[7] = currrentVars->pitn[s];
Q[8] = currrentVars->pixx[s];
Q[9] = currrentVars->pixy[s];
Q[10] = currrentVars->pixn[s];
Q[11] = currrentVars->piyy[s];
Q[12] = currrentVars->piyn[s];
Q[13] = currrentVars->pinn[s];
#endif
#ifdef PI
Q[14] = currrentVars->Pi[s];
#endif
loadSourceTerms2(Q, S, u, up->ut[s], up->ux[s], up->uy[s], up->un[s], t, e[s], p, s);
PRECISION result[NUMBER_CONSERVED_VARIABLES];
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = *(Q+n) + d_dt * ( *(S+n) );
}
updatedVars->ttt[s] = result[0];
updatedVars->ttx[s] = result[1];
updatedVars->tty[s] = result[2];
updatedVars->ttn[s] = result[3];
#ifdef PIMUNU
updatedVars->pitt[s] = result[4];
updatedVars->pitx[s] = result[5];
updatedVars->pity[s] = result[6];
updatedVars->pitn[s] = result[7];
updatedVars->pixx[s] = result[8];
updatedVars->pixy[s] = result[9];
updatedVars->pixn[s] = result[10];
updatedVars->piyy[s] = result[11];
updatedVars->piyn[s] = result[12];
updatedVars->pinn[s] = result[13];
#endif
#ifdef PI
updatedVars->Pi[s] = result[14];
#endif
}
}
__global__
void eulerStepKernelX_1D(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
PRECISION I[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
// calculate neighbor cell indices;
int sim = s-1;
int simm = sim-1;
int sip = s+1;
int sipp = sip+1;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,I,s,ptr,simm,sim,sip,sipp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(I, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusX, &Fx, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux(I, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusX, &Fx, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dx;
}
#ifndef IDEAL
loadSourceTermsX(I, H, u, s);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
void eulerStepKernelY_1D(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
PRECISION J[5* NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
// calculate neighbor cell indices;
int sjm = s-d_ncx;
int sjmm = sjm-d_ncx;
int sjp = s+d_ncx;
int sjpp = sjp+d_ncx;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,J,s,ptr,sjmm,sjm,sjp,sjpp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(J, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusY, &Fy, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux(J, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusY, &Fy, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dy;
}
#ifndef IDEAL
loadSourceTermsY(J, H, u, s);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
void eulerStepKernelZ_1D(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
PRECISION K[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
// calculate neighbor cell indices;
int stride = d_ncx * d_ncy;
int skm = s-stride;
int skmm = skm-stride;
int skp = s+stride;
int skpp = skp+stride;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,K,s,ptr,skmm,skm,skp,skpp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(K, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusZ, &Fz, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = -*(H+n);
}
flux(K, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusZ, &Fz, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dz;
}
#ifndef IDEAL
loadSourceTermsZ(K, H, u, s, t);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
/**************************************************************************************************************************************************/
| 6746e7887fad28f4f1f59d51fdcf78c2d4143301.cu | #include <stdlib.h>
#include <stdio.h> // for printf
#include <cuda.h>
#include <cuda_runtime.h>
#include "../include/FullyDiscreteKurganovTadmorScheme.cuh"
#include "../include/LatticeParameters.h"
#include "../include/DynamicalVariables.cuh"
#include "../include/SemiDiscreteKurganovTadmorScheme.cuh"
#include "../include/HalfSiteExtrapolation.cuh"
#include "../include/FluxFunctions.cuh"
#include "../include/SpectralRadius.cuh"
#include "../include/SourceTerms.cuh"
#include "../include/EnergyMomentumTensor.cuh"
#include "../include/CudaConfiguration.cuh"
#include "../include/RegulateDissipativeCurrents.cuh"
/**************************************************************************************************************************************************\
__device__
void setNeighborCells(const PRECISION * const __restrict__ data,
PRECISION * const __restrict__ I, PRECISION * const __restrict__ J, PRECISION * const __restrict__ K, PRECISION * const __restrict__ Q,
int s, unsigned int n, int ptr, int simm, int sim, int sip, int sipp, int sjmm, int sjm, int sjp, int sjpp, int skmm, int skm, int skp, int skpp) {
PRECISION data_ns = data[s];
// I
*(I+ptr) = *(data+simm);
*(I+ptr+1) = *(data+sim);
*(I+ptr+2) = data_ns;
*(I+ptr+3) = *(data+sip);
*(I+ptr+4) = *(data+sipp);
// J
*(J+ptr) = *(data+sjmm);
*(J+ptr+1) = *(data+sjm);
*(J+ptr+2) = data_ns;
*(J+ptr+3) = *(data+sjp);
*(J+ptr+4) = *(data+sjpp);
// K
*(K+ptr) = *(data+skmm);
*(K+ptr+1) = *(data+skm);
*(K+ptr+2) = data_ns;
*(K+ptr+3) = *(data+skp);
*(K+ptr+4) = *(data+skpp);
// Q
*(Q + n) = data_ns;
}
__global__
void eulerStepKernel(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const PRECISION * const __restrict__ e, const PRECISION * const __restrict__ p,
const FLUID_VELOCITY * const __restrict__ u, const FLUID_VELOCITY * const __restrict__ up
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
PRECISION I[5 * NUMBER_CONSERVED_VARIABLES], J[5* NUMBER_CONSERVED_VARIABLES], K[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION hpx[NUMBER_CONSERVED_VARIABLES], hmx[NUMBER_CONSERVED_VARIABLES],
hpy[NUMBER_CONSERVED_VARIABLES], hmy[NUMBER_CONSERVED_VARIABLES],
hpz[NUMBER_CONSERVED_VARIABLES], hmz[NUMBER_CONSERVED_VARIABLES];
PRECISION Q[NUMBER_CONSERVED_VARIABLES], S[NUMBER_CONSERVED_VARIABLES];
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
// calculate neighbor cell indices;
int sim = s-1;
int simm = sim-1;
int sip = s+1;
int sipp = sip+1;
int sjm = s-d_ncx;
int sjmm = sjm-d_ncx;
int sjp = s+d_ncx;
int sjpp = sjp+d_ncx;
int stride = d_ncx * d_ncy;
int skm = s-stride;
int skmm = skm-stride;
int skp = s+stride;
int skpp = skp+stride;
int ptr = 0;
setNeighborCells(currrentVars->ttt,I,J,K,Q,s,0,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->ttx,I,J,K,Q,s,1,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->tty,I,J,K,Q,s,2,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->ttn,I,J,K,Q,s,3,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitt,I,J,K,Q,s,4,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitx,I,J,K,Q,s,5,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pity,I,J,K,Q,s,6,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitn,I,J,K,Q,s,7,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixx,I,J,K,Q,s,8,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixy,I,J,K,Q,s,9,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixn,I,J,K,Q,s,10,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->piyy,I,J,K,Q,s,11,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->piyn,I,J,K,Q,s,12,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pinn,I,J,K,Q,s,13,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp);
flux(I, hpx, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusX, &Fx, t, e[s]);
flux(I, hmx, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusX, &Fx, t, e[s]);
flux(J, hpy, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusY, &Fy, t, e[s]);
flux(J, hmy, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusY, &Fy, t, e[s]);
flux(K, hpz, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusZ, &Fz, t, e[s]);
flux(K, hmz, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusZ, &Fz, t, e[s]);
loadSourceTerms(I, J, K, Q, S, u, up->ut[s], up->ux[s], up->uy[s], up->un[s], t, e[s], p, s);
PRECISION result[NUMBER_CONSERVED_VARIABLES];
for (int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = *(Q+n) + d_dt * ( *(S+n) - ( *(hpx+n) - *(hmx+n) + *(hpy+n) - *(hmy+n) ) / d_dx - ( *(hpz+n) - *(hmz+n) )/d_dz );
}
updatedVars->ttt[s] = result[0];
updatedVars->ttx[s] = result[1];
updatedVars->tty[s] = result[2];
updatedVars->ttn[s] = result[3];
updatedVars->pitt[s] = result[4];
updatedVars->pitx[s] = result[5];
updatedVars->pity[s] = result[6];
updatedVars->pitn[s] = result[7];
updatedVars->pixx[s] = result[8];
updatedVars->pixy[s] = result[9];
updatedVars->pixn[s] = result[10];
updatedVars->piyy[s] = result[11];
updatedVars->piyn[s] = result[12];
updatedVars->pinn[s] = result[13];
}
/**************************************************************************************************************************************************/
/**************************************************************************************************************************************************\
__device__
void setNeighborCells2(const PRECISION * const __restrict__ in, PRECISION * const __restrict__ out, PRECISION data_ns,
int ptr, int smm, int sm, int sp, int spp
) {
*(out + ptr ) = in[smm];
*(out + ptr + 1) = in[sm];
*(out + ptr + 2) = data_ns;
*(out + ptr + 3) = in[sp];
*(out + ptr + 4) = in[spp];
}
__device__
void setNeighborCells3(const PRECISION * const __restrict__ in, PRECISION * const __restrict__ out, int ptr, int smm, int sm, int sp, int spp
) {
*(out + ptr ) = in[smm];
*(out + ptr + 1) = in[sm];
*(out + ptr + 3) = in[sp];
*(out + ptr + 4) = in[spp];
}
__global__
void eulerStepKernel_1D(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const PRECISION * const __restrict__ e, const PRECISION * const __restrict__ p,
const FLUID_VELOCITY * const __restrict__ u, const FLUID_VELOCITY * const __restrict__ up
) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
PRECISION I[5 * NUMBER_CONSERVED_VARIABLES], J[5* NUMBER_CONSERVED_VARIABLES], K[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES], Q[NUMBER_CONSERVED_VARIABLES];
// calculate neighbor cell indices;
int sim = s-1;
int simm = sim-1;
int sip = s+1;
int sipp = sip+1;
int sjm = s-d_ncx;
int sjmm = sjm-d_ncx;
int sjp = s+d_ncx;
int sjpp = sjp+d_ncx;
int stride = d_ncx * d_ncy;
int skm = s-stride;
int skmm = skm-stride;
int skp = s+stride;
int skpp = skp+stride;
int ptr = 0;
setNeighborCells(currrentVars->ttt,I,J,K,Q,s,0,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->ttx,I,J,K,Q,s,1,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->tty,I,J,K,Q,s,2,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->ttn,I,J,K,Q,s,3,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitt,I,J,K,Q,s,4,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitx,I,J,K,Q,s,5,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pity,I,J,K,Q,s,6,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pitn,I,J,K,Q,s,7,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixx,I,J,K,Q,s,8,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixy,I,J,K,Q,s,9,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pixn,I,J,K,Q,s,10,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->piyy,I,J,K,Q,s,11,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->piyn,I,J,K,Q,s,12,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->pinn,I,J,K,Q,s,13,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp); ptr+=5;
setNeighborCells(currrentVars->Pi,I,J,K,Q,s,14,ptr,simm,sim,sip,sipp,sjmm,sjm,sjp,sjpp,skmm,skm,skp,skpp);
//=======================================================================================================================
PRECISION es = e[s];
loadSourceTerms(I, J, K, Q, H, u, up->ut[s], up->ux[s], up->uy[s], up->un[s], t, es, p, s);
PRECISION result[NUMBER_CONSERVED_VARIABLES];
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = *(Q+n) + d_dt * ( *(H+n) );
}
// X derivatives
PRECISION facX = d_dt/d_dx;
flux(I, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusX, &Fx, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) -= *(H+n)*facX;
}
flux(I, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusX, &Fx, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n)*facX;
}
// Y derivatives
flux(I, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusY, &Fy, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) -= *(H+n)*facX;
}
flux(I, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusY, &Fy, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n)*facX;
}
// Z derivatives
PRECISION facZ = d_dt/d_dz;
flux(I, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusZ, &Fz, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) -= *(H+n)*facZ;
}
flux(I, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusZ, &Fz, t, es);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n)*facZ;
}
//=======================================================================================================================
updatedVars->ttt[s] = result[0];
updatedVars->ttx[s] = result[1];
updatedVars->tty[s] = result[2];
updatedVars->ttn[s] = result[3];
#ifdef PIMUNU
updatedVars->pitt[s] = result[4];
updatedVars->pitx[s] = result[5];
updatedVars->pity[s] = result[6];
updatedVars->pitn[s] = result[7];
updatedVars->pixx[s] = result[8];
updatedVars->pixy[s] = result[9];
updatedVars->pixn[s] = result[10];
updatedVars->piyy[s] = result[11];
updatedVars->piyn[s] = result[12];
updatedVars->pinn[s] = result[13];
#endif
#ifdef PI
updatedVars->Pi[s] = result[14];
#endif
}
}
/**************************************************************************************************************************************************/
/**************************************************************************************************************************************************/
__global__
void eulerStepKernelSource(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const PRECISION * const __restrict__ e, const PRECISION * const __restrict__ p,
const FLUID_VELOCITY * const __restrict__ u, const FLUID_VELOCITY * const __restrict__ up
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
PRECISION Q[NUMBER_CONSERVED_VARIABLES];
PRECISION S[NUMBER_CONSERVED_VARIABLES];
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
Q[0] = currrentVars->ttt[s];
Q[1] = currrentVars->ttx[s];
Q[2] = currrentVars->tty[s];
Q[3] = currrentVars->ttn[s];
#ifdef PIMUNU
Q[4] = currrentVars->pitt[s];
Q[5] = currrentVars->pitx[s];
Q[6] = currrentVars->pity[s];
Q[7] = currrentVars->pitn[s];
Q[8] = currrentVars->pixx[s];
Q[9] = currrentVars->pixy[s];
Q[10] = currrentVars->pixn[s];
Q[11] = currrentVars->piyy[s];
Q[12] = currrentVars->piyn[s];
Q[13] = currrentVars->pinn[s];
#endif
#ifdef PI
Q[14] = currrentVars->Pi[s];
#endif
loadSourceTerms2(Q, S, u, up->ut[s], up->ux[s], up->uy[s], up->un[s], t, e[s], p, s);
PRECISION result[NUMBER_CONSERVED_VARIABLES];
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = *(Q+n) + d_dt * ( *(S+n) );
}
updatedVars->ttt[s] = result[0];
updatedVars->ttx[s] = result[1];
updatedVars->tty[s] = result[2];
updatedVars->ttn[s] = result[3];
#ifdef PIMUNU
updatedVars->pitt[s] = result[4];
updatedVars->pitx[s] = result[5];
updatedVars->pity[s] = result[6];
updatedVars->pitn[s] = result[7];
updatedVars->pixx[s] = result[8];
updatedVars->pixy[s] = result[9];
updatedVars->pixn[s] = result[10];
updatedVars->piyy[s] = result[11];
updatedVars->piyn[s] = result[12];
updatedVars->pinn[s] = result[13];
#endif
#ifdef PI
updatedVars->Pi[s] = result[14];
#endif
}
}
/**************************************************************************************************************************************************/
/**************************************************************************************************************************************************/
__device__
void setNeighborCellsJK2(const PRECISION * const __restrict__ in, PRECISION * const __restrict__ out,
int s, int ptr, int smm, int sm, int sp, int spp
) {
PRECISION data_ns = in[s];
*(out + ptr ) = in[smm];
*(out + ptr + 1) = in[sm];
*(out + ptr + 2) = data_ns;
*(out + ptr + 3) = in[sp];
*(out + ptr + 4) = in[spp];
}
__global__
void eulerStepKernelX(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
PRECISION I[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
// calculate neighbor cell indices;
int sim = s-1;
int simm = sim-1;
int sip = s+1;
int sipp = sip+1;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,I,s,ptr,simm,sim,sip,sipp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(I, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusX, &Fx, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux(I, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusX, &Fx, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dx;
}
#ifndef IDEAL
loadSourceTermsX(I, H, u, s);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
void eulerStepKernelY(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
PRECISION J[5* NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
// calculate neighbor cell indices;
int sjm = s-d_ncx;
int sjmm = sjm-d_ncx;
int sjp = s+d_ncx;
int sjpp = sjp+d_ncx;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,J,s,ptr,sjmm,sjm,sjp,sjpp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(J, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusY, &Fy, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux(J, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusY, &Fy, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dy;
}
#ifndef IDEAL
loadSourceTermsY(J, H, u, s);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
void eulerStepKernelZ(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
//printf("(i,j,k)=(%d,%d,%d)\n",i,j,k);
PRECISION K[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
// calculate neighbor cell indices;
int stride = d_ncx * d_ncy;
int skm = s-stride;
int skmm = skm-stride;
int skp = s+stride;
int skpp = skp+stride;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,K,s,ptr,skmm,skm,skp,skpp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(K, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusZ, &Fz, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = -*(H+n);
}
flux(K, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusZ, &Fz, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dz;
}
#ifndef IDEAL
loadSourceTermsZ(K, H, u, s, t);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
/**************************************************************************************************************************************************\
/**************************************************************************************************************************************************\
__device__
void setSharedData(const CONSERVED_VARIABLES * const __restrict__ in, PRECISION * const __restrict__ out,
int i, int j, int k, int nx, int ny, int stride, int s) {
int ptr = i + nx*(j + ny*k);
*(out + ptr) = in->ttt[s]; ptr+=stride;
*(out + ptr) = in->ttx[s]; ptr+=stride;
*(out + ptr) = in->tty[s]; ptr+=stride;
*(out + ptr) = in->ttn[s]; ptr+=stride;
*(out + ptr) = in->pitt[s]; ptr+=stride;
*(out + ptr) = in->pitx[s]; ptr+=stride;
*(out + ptr) = in->pity[s]; ptr+=stride;
*(out + ptr) = in->pitn[s]; ptr+=stride;
*(out + ptr) = in->pixx[s]; ptr+=stride;
*(out + ptr) = in->pixy[s]; ptr+=stride;
*(out + ptr) = in->pixn[s]; ptr+=stride;
*(out + ptr) = in->piyy[s]; ptr+=stride;
*(out + ptr) = in->piyn[s]; ptr+=stride;
*(out + ptr) = in->pinn[s]; ptr+=stride;
#ifdef PI
*(out + ptr) = in->Pi[s];
#endif
}
__global__
void eulerStepKernelSharedX(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
__shared__ PRECISION s_data[NUMBER_CONSERVED_VARIABLES*(BSX_X+4)*BSX_Y*BSX_Z];
int tx = threadIdx.x + N_GHOST_CELLS_M;
int ty = threadIdx.y;
int tz = threadIdx.z;
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
int stride = (BSX_X+4)*BSX_Y*BSX_Z;
int ts = tx + (BSX_X+4) * (ty + BSX_Y * tz);
setSharedData(currrentVars, s_data, tx, ty, tz, BSX_X+4, BSX_Y, stride, s);
if(threadIdx.x==0) {
int sim = s-1;
int simm = sim-1;
setSharedData(currrentVars, s_data, tx-1, ty, tz, BSX_X+4, BSX_Y, stride, sim);
setSharedData(currrentVars, s_data, tx-2, ty, tz, BSX_X+4, BSX_Y, stride, simm);
}
if(threadIdx.x==blockDim.x-1) {
int sip = s+1;
int sipp = sip+1;
setSharedData(currrentVars, s_data, tx+1, ty, tz, BSX_X+4, BSX_Y, stride, sip);
setSharedData(currrentVars, s_data, tx+2, ty, tz, BSX_X+4, BSX_Y, stride, sipp);
}
__syncthreads();
//==========================================================================================
PRECISION e_s = e[s];
PRECISION result[NUMBER_CONSERVED_VARIABLES], H[NUMBER_CONSERVED_VARIABLES];
flux2(s_data, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusX, &Fx, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux2(s_data, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusX, &Fx, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dx;
}
// source
PRECISION facX = 1/d_dx/2;
int ptr = ts+4*stride;
PRECISION dxpitt = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpitx = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpity = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpitn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpixx = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpixy = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=stride;
PRECISION dxpixn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX; ptr+=4*stride;
PRECISION ut = u->ut[s];
PRECISION ux = u->ux[s];
PRECISION vx = fdividef(ux, ut);
#ifndef PI
result[0] += dxpitt*vx - dxpitx;
result[1] += dxpitx*vx - dxpixx;
#else
PRECISION dxPi = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facX;
result[0] += dxpitt*vx - dxpitx - vx*dxPi;
result[1] += dxpitx*vx - dxpixx - dxPi;
#endif
result[2] += dxpity*vx - dxpixy;
result[3] += dxpitn*vx - dxpixn;
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
//==========================================================================================
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
//__launch_bounds__(1024, 3)
void eulerStepKernelSharedY(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
__shared__ PRECISION s_data[NUMBER_CONSERVED_VARIABLES*(BSY_Y+4)*BSY_X*BSY_Z];
int tx = threadIdx.x;
int ty = threadIdx.y + N_GHOST_CELLS_M;
int tz = threadIdx.z;
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
int stride = (BSY_Y+4)*BSY_X*BSY_Z;
int ts = ty + (BSY_Y+4) * (tx + BSY_X * tz);
setSharedData(currrentVars, s_data, ty, tx, tz, BSY_Y+4, BSY_X, stride, s);
if(threadIdx.y==0) {
int sjm = s-d_ncx;
int sjmm = sjm-d_ncx;
setSharedData(currrentVars, s_data, ty-1, tx, tz, BSY_Y+4, BSY_X, stride, sjm);
setSharedData(currrentVars, s_data, ty-2, tx, tz, BSY_Y+4, BSY_X, stride, sjmm);
}
if(threadIdx.y==blockDim.y-1) {
int sjp = s+d_ncx;
int sjpp = sjp+d_ncx;
setSharedData(currrentVars, s_data, ty+1, tx, tz, BSY_Y+4, BSY_X, stride, sjp);
setSharedData(currrentVars, s_data, ty+2, tx, tz, BSY_Y+4, BSY_X, stride, sjpp);
}
__syncthreads();
//==========================================================================================
PRECISION e_s = e[s];
PRECISION result[NUMBER_CONSERVED_VARIABLES], H[NUMBER_CONSERVED_VARIABLES];
flux2(s_data, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusY, &Fy, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux2(s_data, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusY, &Fy, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dy;
}
// source
PRECISION facY = 1/d_dy/2;
int ptr = ts+4*stride;
PRECISION dypitt = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride;
PRECISION dypitx = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride;
PRECISION dypity = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride;
PRECISION dypitn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride; ptr+=stride;
PRECISION dypixy = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride; ptr+=stride;
PRECISION dypiyy = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride;
PRECISION dypiyn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY; ptr+=stride; ptr+=stride;
PRECISION ut = u->ut[s];
PRECISION uy = u->uy[s];
PRECISION vy =fdividef(uy, ut);
#ifndef PI
result[0] += dypitt*vy - dypity;
result[2] += dypity*vy - dypiyy;
#else
PRECISION dyPi = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facY;
result[0] += dypitt*vy - dypity - vy*dyPi;
result[2] += dypity*vy - dypiyy - dyPi;
#endif
result[1] += dypitx*vy - dypixy;
result[3] += dypitn*vy - dypiyn;
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
//==========================================================================================
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
void eulerStepKernelSharedZ(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
int i = blockDim.x * blockIdx.x + threadIdx.x + N_GHOST_CELLS_M;
int j = blockDim.y * blockIdx.y + threadIdx.y + N_GHOST_CELLS_M;
int k = blockDim.z * blockIdx.z + threadIdx.z + N_GHOST_CELLS_M;
if ( (i < d_ncx-2) && (j < d_ncy-2) && (k < d_ncz-2) ) {
printf("(i,j,k)=(%d,%d,%d)\n",i,j,k);
__shared__ PRECISION s_data[NUMBER_CONSERVED_VARIABLES*(BSZ_Z+4)*BSZ_X*BSZ_Y];
int tx = threadIdx.x;
int ty = threadIdx.y;
int tz = threadIdx.z + N_GHOST_CELLS_M;
int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
int stride = (BSZ_Z+4)*BSZ_X*BSZ_Y;
int ts = tz + (BSZ_Z+4) * (ty + BSZ_Y * tx);
setSharedData(currrentVars, s_data, tz, ty, tx, BSZ_Z+4, BSZ_Y, stride, s);
if(threadIdx.z==0) {
int skm = s-d_ncx*d_ncy;
int skmm = skm-d_ncx*d_ncy;
setSharedData(currrentVars, s_data, tz-1, ty, tx, BSZ_Z+4, BSZ_Y, stride, skm);
setSharedData(currrentVars, s_data, tz-2, ty, tx, BSZ_Z+4, BSZ_Y, stride, skmm);
}
if(threadIdx.z==blockDim.z-1) {
int skp = s+d_ncx*d_ncy;
int skpp = skp+d_ncx*d_ncy;
setSharedData(currrentVars, s_data, tz+1, ty, tx, BSZ_Z+4, BSZ_Y, stride, skp);
setSharedData(currrentVars, s_data, tz+2, ty, tx, BSZ_Z+4, BSZ_Y, stride, skpp);
}
__syncthreads();
//==========================================================================================
PRECISION e_s = e[s];
PRECISION result[NUMBER_CONSERVED_VARIABLES], H[NUMBER_CONSERVED_VARIABLES];
flux2(s_data, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusZ, &Fz, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux2(s_data, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusZ, &Fz, t, ts, stride, e_s);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dz;
}
// source
PRECISION facZ = 1/d_dz/2;
int ptr = ts+4*stride;
PRECISION dnpitt = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride;
PRECISION dnpitx = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride;
PRECISION dnpity = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride;
PRECISION dnpitn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride; ptr+=stride; ptr+=stride;
PRECISION dnpixn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride; ptr+=stride;
PRECISION dnpiyn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride;
PRECISION dnpinn = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ; ptr+=stride;
PRECISION ut = u->ut[s];
PRECISION un = u->un[s];
PRECISION vn = fdividef(un, ut);
#ifndef PI
result[0] += dnpitt*vn - dnpitn;
result[3] += dnpitn*vn - dnpinn;
#else
PRECISION dnPi = (*(s_data + ptr + 1) - *(s_data + ptr - 1)) *facZ;
result[0] += dnpitt*vn - dnpitn - vn*dnPi;
result[3] += dnpitn*vn - dnpinn - dnPi/powf(t,2.0f);
#endif
result[1] += dnpitx*vn - dnpixn;
result[2] += dnpity*vn - dnpiyn;
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
//==========================================================================================
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
/**************************************************************************************************************************************************\
/**************************************************************************************************************************************************/
__global__
void eulerStepKernelSource_1D(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const PRECISION * const __restrict__ e, const PRECISION * const __restrict__ p,
const FLUID_VELOCITY * const __restrict__ u, const FLUID_VELOCITY * const __restrict__ up
) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
PRECISION Q[NUMBER_CONSERVED_VARIABLES];
PRECISION S[NUMBER_CONSERVED_VARIABLES];
Q[0] = currrentVars->ttt[s];
Q[1] = currrentVars->ttx[s];
Q[2] = currrentVars->tty[s];
Q[3] = currrentVars->ttn[s];
#ifdef PIMUNU
Q[4] = currrentVars->pitt[s];
Q[5] = currrentVars->pitx[s];
Q[6] = currrentVars->pity[s];
Q[7] = currrentVars->pitn[s];
Q[8] = currrentVars->pixx[s];
Q[9] = currrentVars->pixy[s];
Q[10] = currrentVars->pixn[s];
Q[11] = currrentVars->piyy[s];
Q[12] = currrentVars->piyn[s];
Q[13] = currrentVars->pinn[s];
#endif
#ifdef PI
Q[14] = currrentVars->Pi[s];
#endif
loadSourceTerms2(Q, S, u, up->ut[s], up->ux[s], up->uy[s], up->un[s], t, e[s], p, s);
PRECISION result[NUMBER_CONSERVED_VARIABLES];
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = *(Q+n) + d_dt * ( *(S+n) );
}
updatedVars->ttt[s] = result[0];
updatedVars->ttx[s] = result[1];
updatedVars->tty[s] = result[2];
updatedVars->ttn[s] = result[3];
#ifdef PIMUNU
updatedVars->pitt[s] = result[4];
updatedVars->pitx[s] = result[5];
updatedVars->pity[s] = result[6];
updatedVars->pitn[s] = result[7];
updatedVars->pixx[s] = result[8];
updatedVars->pixy[s] = result[9];
updatedVars->pixn[s] = result[10];
updatedVars->piyy[s] = result[11];
updatedVars->piyn[s] = result[12];
updatedVars->pinn[s] = result[13];
#endif
#ifdef PI
updatedVars->Pi[s] = result[14];
#endif
}
}
__global__
void eulerStepKernelX_1D(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
PRECISION I[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
// calculate neighbor cell indices;
int sim = s-1;
int simm = sim-1;
int sip = s+1;
int sipp = sip+1;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,I,s,ptr,simm,sim,sip,sipp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,I,s,ptr,simm,sim,sip,sipp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(I, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusX, &Fx, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux(I, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusX, &Fx, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dx;
}
#ifndef IDEAL
loadSourceTermsX(I, H, u, s);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
void eulerStepKernelY_1D(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
PRECISION J[5* NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
// calculate neighbor cell indices;
int sjm = s-d_ncx;
int sjmm = sjm-d_ncx;
int sjp = s+d_ncx;
int sjpp = sjp+d_ncx;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,J,s,ptr,sjmm,sjm,sjp,sjpp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,J,s,ptr,sjmm,sjm,sjp,sjpp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(J, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusY, &Fy, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = - *(H+n);
}
flux(J, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusY, &Fy, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dy;
}
#ifndef IDEAL
loadSourceTermsY(J, H, u, s);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
__global__
void eulerStepKernelZ_1D(PRECISION t,
const CONSERVED_VARIABLES * const __restrict__ currrentVars, CONSERVED_VARIABLES * const __restrict__ updatedVars,
const FLUID_VELOCITY * const __restrict__ u, const PRECISION * const __restrict__ e
) {
unsigned int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if (threadID < d_nElements) {
unsigned int k = threadID / (d_nx * d_ny) + N_GHOST_CELLS_M;
unsigned int j = (threadID % (d_nx * d_ny)) / d_nx + N_GHOST_CELLS_M;
unsigned int i = threadID % d_nx + N_GHOST_CELLS_M;
unsigned int s = columnMajorLinearIndex(i, j, k, d_ncx, d_ncy);
PRECISION K[5 * NUMBER_CONSERVED_VARIABLES];
PRECISION H[NUMBER_CONSERVED_VARIABLES];
// calculate neighbor cell indices;
int stride = d_ncx * d_ncy;
int skm = s-stride;
int skmm = skm-stride;
int skp = s+stride;
int skpp = skp+stride;
int ptr=0;
setNeighborCellsJK2(currrentVars->ttt,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->tty,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->ttn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
#ifdef PIMUNU
setNeighborCellsJK2(currrentVars->pitt,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pity,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pitn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixx,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixy,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pixn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyy,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->piyn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
setNeighborCellsJK2(currrentVars->pinn,K,s,ptr,skmm,skm,skp,skpp); ptr+=5;
#endif
#ifdef PI
setNeighborCellsJK2(currrentVars->Pi,K,s,ptr,skmm,skm,skp,skpp);
#endif
PRECISION result[NUMBER_CONSERVED_VARIABLES];
flux(K, H, &rightHalfCellExtrapolationForward, &leftHalfCellExtrapolationForward, &spectralRadiusZ, &Fz, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) = -*(H+n);
}
flux(K, H, &rightHalfCellExtrapolationBackwards, &leftHalfCellExtrapolationBackwards, &spectralRadiusZ, &Fz, t, e[s]);
for (unsigned int n = 0; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) += *(H+n);
*(result+n) /= d_dz;
}
#ifndef IDEAL
loadSourceTermsZ(K, H, u, s, t);
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) += *(H+n);
*(result+n) *= d_dt;
}
#else
for (unsigned int n = 0; n < 4; ++n) {
*(result+n) *= d_dt;
}
#endif
for (unsigned int n = 4; n < NUMBER_CONSERVED_VARIABLES; ++n) {
*(result+n) *= d_dt;
}
updatedVars->ttt[s] += result[0];
updatedVars->ttx[s] += result[1];
updatedVars->tty[s] += result[2];
updatedVars->ttn[s] += result[3];
#ifdef PIMUNU
updatedVars->pitt[s] += result[4];
updatedVars->pitx[s] += result[5];
updatedVars->pity[s] += result[6];
updatedVars->pitn[s] += result[7];
updatedVars->pixx[s] += result[8];
updatedVars->pixy[s] += result[9];
updatedVars->pixn[s] += result[10];
updatedVars->piyy[s] += result[11];
updatedVars->piyn[s] += result[12];
updatedVars->pinn[s] += result[13];
#endif
#ifdef PI
updatedVars->Pi[s] += result[14];
#endif
}
}
/**************************************************************************************************************************************************/
|
1910a9862b29558b0a045220f087ab122cb43be7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "STREAM_Scale_double.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
double scale = 2;
size_t len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
STREAM_Scale_double), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,scale,len);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
STREAM_Scale_double), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,scale,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
STREAM_Scale_double), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,scale,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1910a9862b29558b0a045220f087ab122cb43be7.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "STREAM_Scale_double.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
double *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
double scale = 2;
size_t len = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
STREAM_Scale_double<<<gridBlock,threadBlock>>>(a,b,scale,len);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
STREAM_Scale_double<<<gridBlock,threadBlock>>>(a,b,scale,len);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
STREAM_Scale_double<<<gridBlock,threadBlock>>>(a,b,scale,len);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
0ef6d64d1fe80715d686428dc5814f7448c43094.hip | // !!! This is a file automatically generated by hipify!!!
/*----------------------------------------------------------------
* Multiprocesadores: Cuda matrix multiplication
* Fecha: 26-Sep-2015
* Autor: A01205559 Roberto Nuez
* X = 1_000, Y = 2_000
Speedup = 1392.76660 / 0.00310 = 535679.462
*--------------------------------------------------------------*/
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cheader.h"
#define X 1000
#define Y 2000
#define BLOCK_SIZE 1
__global__ void multiply(int* a, int* b, int* c, int x, int y) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int temp = 0;
if(row < x && col < x) {
for(int i = 0; i < y; i++) {
temp += a[row * y + i] * b[i * x + col];
}
}
c[row * x + col] = temp;
}
int main() {
double acum = 0;
int* matrixA;
int* matrixB;
// int matrixA[X * Y] = {1,-1,1,2,2,3,-2,-3,-1};
// int matrixB[Y * X] = {1,0,4,0,2,5,1,3,0};
// int matrixA[X * Y] = {1,2,-3,4,0,-2};
// int matrixB[Y * X] = {3,1,2,4,-1,5};
int* matrixC;
int* d_matrixA, *d_matrixB, *d_matrixC;
matrixA = (int*)malloc(sizeof(int) * X * Y);
matrixB = (int*)malloc(sizeof(int) * X * Y);
matrixC = (int*)malloc(sizeof(int) * X * X);
for(int i = 0; i < Y * X; i++) {
matrixA[i] = (i % Y) + 1;
}
for(int i = 0; i < Y * X; i++) {
matrixB[i] = (i % X) + 1;
}
printf("Matrix A:\n");
for(int i = 0; i < X * Y; i++) {
if(i % Y == 0) printf("\n");
printf("%i ", matrixA[i]);
}
printf("\n");
printf("Matrix B:\n");
for(int i = 0; i < Y * X; i++) {
if(i % X == 0) printf("\n");
printf("%i ", matrixB[i]);
}
printf("\n");
hipMalloc((void**)&d_matrixA, sizeof(int) * X * Y);
hipMalloc((void**)&d_matrixB, sizeof(int) * X * Y);
hipMalloc((void**)&d_matrixC, sizeof(int) * X * X);
hipMemcpy(d_matrixA, matrixA, sizeof(int) * X * Y, hipMemcpyHostToDevice);
hipMemcpy(d_matrixB, matrixB, sizeof(int) * X * Y, hipMemcpyHostToDevice);
unsigned int grid_rows = (X + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (Y + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
for(int i = 0; i < 10; i++) {
start_timer();
hipLaunchKernelGGL(( multiply), dim3(dimGrid),dim3(dimBlock), 0, 0, d_matrixA, d_matrixB, d_matrixC, X, Y);
acum += stop_timer();
}
hipMemcpy(matrixC, d_matrixC, sizeof(int) * X * X, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf("Result:\n");
for(int i = 0; i < X * X; i++) {
if(i % X == 0) printf("\n");
printf("%i ", matrixC[i]);
}
printf("\n");
printf("avg time = %.5f ms\n", (acum / 10));
hipFree(d_matrixA);
hipFree(d_matrixB);
hipFree(d_matrixC);
free(matrixA);
free(matrixB);
free(matrixC);
return 0;
}
| 0ef6d64d1fe80715d686428dc5814f7448c43094.cu | /*----------------------------------------------------------------
* Multiprocesadores: Cuda matrix multiplication
* Fecha: 26-Sep-2015
* Autor: A01205559 Roberto Nuñez
* X = 1_000, Y = 2_000
Speedup = 1392.76660 / 0.00310 = 535679.462
*--------------------------------------------------------------*/
#include "cuda_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "cheader.h"
#define X 1000
#define Y 2000
#define BLOCK_SIZE 1
__global__ void multiply(int* a, int* b, int* c, int x, int y) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int temp = 0;
if(row < x && col < x) {
for(int i = 0; i < y; i++) {
temp += a[row * y + i] * b[i * x + col];
}
}
c[row * x + col] = temp;
}
int main() {
double acum = 0;
int* matrixA;
int* matrixB;
// int matrixA[X * Y] = {1,-1,1,2,2,3,-2,-3,-1};
// int matrixB[Y * X] = {1,0,4,0,2,5,1,3,0};
// int matrixA[X * Y] = {1,2,-3,4,0,-2};
// int matrixB[Y * X] = {3,1,2,4,-1,5};
int* matrixC;
int* d_matrixA, *d_matrixB, *d_matrixC;
matrixA = (int*)malloc(sizeof(int) * X * Y);
matrixB = (int*)malloc(sizeof(int) * X * Y);
matrixC = (int*)malloc(sizeof(int) * X * X);
for(int i = 0; i < Y * X; i++) {
matrixA[i] = (i % Y) + 1;
}
for(int i = 0; i < Y * X; i++) {
matrixB[i] = (i % X) + 1;
}
printf("Matrix A:\n");
for(int i = 0; i < X * Y; i++) {
if(i % Y == 0) printf("\n");
printf("%i ", matrixA[i]);
}
printf("\n");
printf("Matrix B:\n");
for(int i = 0; i < Y * X; i++) {
if(i % X == 0) printf("\n");
printf("%i ", matrixB[i]);
}
printf("\n");
cudaMalloc((void**)&d_matrixA, sizeof(int) * X * Y);
cudaMalloc((void**)&d_matrixB, sizeof(int) * X * Y);
cudaMalloc((void**)&d_matrixC, sizeof(int) * X * X);
cudaMemcpy(d_matrixA, matrixA, sizeof(int) * X * Y, cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixB, matrixB, sizeof(int) * X * Y, cudaMemcpyHostToDevice);
unsigned int grid_rows = (X + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (Y + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
for(int i = 0; i < 10; i++) {
start_timer();
multiply<<<dimGrid,dimBlock>>>(d_matrixA, d_matrixB, d_matrixC, X, Y);
acum += stop_timer();
}
cudaMemcpy(matrixC, d_matrixC, sizeof(int) * X * X, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
printf("Result:\n");
for(int i = 0; i < X * X; i++) {
if(i % X == 0) printf("\n");
printf("%i ", matrixC[i]);
}
printf("\n");
printf("avg time = %.5f ms\n", (acum / 10));
cudaFree(d_matrixA);
cudaFree(d_matrixB);
cudaFree(d_matrixC);
free(matrixA);
free(matrixB);
free(matrixC);
return 0;
}
|
0aecac7ca3e962bbc67f3fcd3dff8c20b6c21a0f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <malloc.h>
#include <stdlib.h>
/*
#include <time.h>
#include <cutil_inline.h>
*/
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#define MATRIX_SIZE 1024/**/
#define BLOCK_SIZE 16
__global__ void
matrixMul(int* inMatrixA, int* inMatrixB, int* inMatrixC);
int main(int argc, char** argv) {
unsigned int matrixSize = sizeof(unsigned int) * MATRIX_SIZE * MATRIX_SIZE;
int* hMatrixA;
int* hMatrixB;
int* hMatrixC;
hMatrixA = (int*)malloc(matrixSize);
hMatrixB = (int*)malloc(matrixSize);
/**/
unsigned int col_idx, row_idx;
for (col_idx = 0; col_idx < MATRIX_SIZE; col_idx++) {
for (row_idx = 0; row_idx < MATRIX_SIZE; row_idx++) {
hMatrixA[col_idx * MATRIX_SIZE + row_idx] = rand() % (1024 * 1024);
hMatrixB[col_idx * MATRIX_SIZE + row_idx] = rand() % (1024 * 1024);
}
}
/**/
int* dMatrixA;
int* dMatrixB;
int* dMatrixC;
/**/
hipMalloc((void**)&dMatrixA, matrixSize);
hipMemcpy(dMatrixA, hMatrixA, matrixSize, hipMemcpyHostToDevice);
hipMalloc((void**)&dMatrixB, matrixSize);
hipMemcpy(dMatrixB, hMatrixB, matrixSize, hipMemcpyHostToDevice);
hipMalloc((void**)&dMatrixC, matrixSize);
/**/
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / BLOCK_SIZE, MATRIX_SIZE / BLOCK_SIZE);
/**/
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
/**/
hipLaunchKernelGGL(( matrixMul) , dim3(grid), dim3(block) , 0, 0, dMatrixA, dMatrixB, dMatrixC);
hipDeviceSynchronize();
/**/
hMatrixC = (int*)malloc(matrixSize);
hipMemcpy(hMatrixC, dMatrixC, matrixSize, hipMemcpyDeviceToHost);
/**/
sdkStopTimer(&timer);
printf(" =%f(ms)\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
/**/
free(hMatrixA);
free(hMatrixB);
free(hMatrixC);
hipFree(dMatrixA);
hipFree(dMatrixB);
hipFree(dMatrixC);
/**/
// Wait for the stop event to complete
hipDeviceReset();
/*
cutilExit(argc, argv);
*/
}
__global__ void
matrixMul(int* inMatrixA, int* inMatrixB, int* inMatrixC) {
unsigned int col_idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row_idx = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int scan_idx;
unsigned int target = 0;
/**/
for (scan_idx = 0; scan_idx < MATRIX_SIZE; scan_idx++) {
target += inMatrixA[col_idx * MATRIX_SIZE + scan_idx] * inMatrixB[scan_idx * MATRIX_SIZE + row_idx];
__syncthreads();
}
inMatrixC[col_idx * MATRIX_SIZE + row_idx] = target;
} | 0aecac7ca3e962bbc67f3fcd3dff8c20b6c21a0f.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <malloc.h>
#include <stdlib.h>
/*
#include <time.h>
#include <cutil_inline.h>
*/
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_timer.h>
#define MATRIX_SIZE 1024/*行列1辺の数*/
#define BLOCK_SIZE 16
__global__ void
matrixMul(int* inMatrixA, int* inMatrixB, int* inMatrixC);
int main(int argc, char** argv) {
unsigned int matrixSize = sizeof(unsigned int) * MATRIX_SIZE * MATRIX_SIZE;
int* hMatrixA;
int* hMatrixB;
int* hMatrixC;
hMatrixA = (int*)malloc(matrixSize);
hMatrixB = (int*)malloc(matrixSize);
/*初期値設定*/
unsigned int col_idx, row_idx;
for (col_idx = 0; col_idx < MATRIX_SIZE; col_idx++) {
for (row_idx = 0; row_idx < MATRIX_SIZE; row_idx++) {
hMatrixA[col_idx * MATRIX_SIZE + row_idx] = rand() % (1024 * 1024);
hMatrixB[col_idx * MATRIX_SIZE + row_idx] = rand() % (1024 * 1024);
}
}
/*デバイス側の変数設定*/
int* dMatrixA;
int* dMatrixB;
int* dMatrixC;
/*デバイスメモリ領域の確保*/
cudaMalloc((void**)&dMatrixA, matrixSize);
cudaMemcpy(dMatrixA, hMatrixA, matrixSize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&dMatrixB, matrixSize);
cudaMemcpy(dMatrixB, hMatrixB, matrixSize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&dMatrixC, matrixSize);
/*ブロックサイズとグリッドサイズの設定*/
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid(MATRIX_SIZE / BLOCK_SIZE, MATRIX_SIZE / BLOCK_SIZE);
/*タイマーを作成して計測開始*/
StopWatchInterface *timer = NULL;
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
/*カーネルの起動*/
matrixMul <<< grid, block >>>(dMatrixA, dMatrixB, dMatrixC);
cudaThreadSynchronize();
/*結果の領域確保とデバイス側からのメモリ転送*/
hMatrixC = (int*)malloc(matrixSize);
cudaMemcpy(hMatrixC, dMatrixC, matrixSize, cudaMemcpyDeviceToHost);
/*タイマーを停止しかかった時間を表示*/
sdkStopTimer(&timer);
printf("計算時間 =%f(ms)\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
/*ホスト・デバイスメモリの開放*/
free(hMatrixA);
free(hMatrixB);
free(hMatrixC);
cudaFree(dMatrixA);
cudaFree(dMatrixB);
cudaFree(dMatrixC);
/*終了処理*/
// Wait for the stop event to complete
cudaThreadExit();
/*
cutilExit(argc, argv);
*/
}
__global__ void
matrixMul(int* inMatrixA, int* inMatrixB, int* inMatrixC) {
unsigned int col_idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row_idx = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int scan_idx;
unsigned int target = 0;
/*行列の演算を行う*/
for (scan_idx = 0; scan_idx < MATRIX_SIZE; scan_idx++) {
target += inMatrixA[col_idx * MATRIX_SIZE + scan_idx] * inMatrixB[scan_idx * MATRIX_SIZE + row_idx];
__syncthreads();
}
inMatrixC[col_idx * MATRIX_SIZE + row_idx] = target;
} |
647b705165eb46fe34468af47270081fc49e973f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
extern "C" __global__ void kernelFunction(int *input)
{
input[threadIdx.x] = 32 - threadIdx.x;
}
| 647b705165eb46fe34468af47270081fc49e973f.cu | /*
* Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
extern "C" __global__ void kernelFunction(int *input)
{
input[threadIdx.x] = 32 - threadIdx.x;
}
|
fbbfe6b0aee1011e0d496d0d08743cc2ea20b0c2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void compare(int *in_d, int* out_d)
{
if (in_d[blockIdx.x] == 6)
{
out_d[blockIdx.x] = 1;
}
else
out_d[blockIdx.x] = 0;
}
#define SIZE 16
int main()
{
//create two arrays
size_t bytes = SIZE * sizeof(int);
int *input, *output;
input = (int*)malloc(bytes);
output = (int*)malloc(bytes);
//fill the input array with random values in [0,9]
srand(time(0));
int i;
for (i = 0; i < SIZE; i++)
{
input[i] = rand() % 10;
output[i] = -1;
}
//now allocate both arrays on the gpu
int *input_d, *output_d;
hipMalloc((void**)&input_d, bytes);
hipMalloc((void**)&output_d, bytes);
//now we copy the input array to the gpu
hipMemcpy(input_d, input, bytes, hipMemcpyHostToDevice);
//launch the kernel
compare << <SIZE, 1 >> >(input_d, output_d);
//copy the output array back to the cpu mem
hipMemcpy(output, output_d, bytes, hipMemcpyDeviceToHost);
//free memory on the gpu
hipFree(input_d);
hipFree(output_d);
input_d = 0;
output_d = 0;
//display our answers
int total = 0;
for (int i = 0; i < SIZE; i++)
{
if (output[i] == 1)
total += 1;
}
for (int i = 0; i < SIZE; i++)
{
printf("%d ", input[i]);
}
printf("\n");
for (int i = 0; i < SIZE; i++)
{
printf("%d ", output[i]);
}
printf("total sixes = %d\n", total);
//free our arrays from memory
free(input);
free(output);
input = 0;
output = 0;
}
| fbbfe6b0aee1011e0d496d0d08743cc2ea20b0c2.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void compare(int *in_d, int* out_d)
{
if (in_d[blockIdx.x] == 6)
{
out_d[blockIdx.x] = 1;
}
else
out_d[blockIdx.x] = 0;
}
#define SIZE 16
int main()
{
//create two arrays
size_t bytes = SIZE * sizeof(int);
int *input, *output;
input = (int*)malloc(bytes);
output = (int*)malloc(bytes);
//fill the input array with random values in [0,9]
srand(time(0));
int i;
for (i = 0; i < SIZE; i++)
{
input[i] = rand() % 10;
output[i] = -1;
}
//now allocate both arrays on the gpu
int *input_d, *output_d;
cudaMalloc((void**)&input_d, bytes);
cudaMalloc((void**)&output_d, bytes);
//now we copy the input array to the gpu
cudaMemcpy(input_d, input, bytes, cudaMemcpyHostToDevice);
//launch the kernel
compare << <SIZE, 1 >> >(input_d, output_d);
//copy the output array back to the cpu mem
cudaMemcpy(output, output_d, bytes, cudaMemcpyDeviceToHost);
//free memory on the gpu
cudaFree(input_d);
cudaFree(output_d);
input_d = 0;
output_d = 0;
//display our answers
int total = 0;
for (int i = 0; i < SIZE; i++)
{
if (output[i] == 1)
total += 1;
}
for (int i = 0; i < SIZE; i++)
{
printf("%d ", input[i]);
}
printf("\n");
for (int i = 0; i < SIZE; i++)
{
printf("%d ", output[i]);
}
printf("total sixes = %d\n", total);
//free our arrays from memory
free(input);
free(output);
input = 0;
output = 0;
}
|
5fb9f543cda364af9e4ec7762fb82b02ddcb9dcb.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "minHeap.c"
#include <assert.h>
#define CUDACHECK(cmd) do { \
hipError_t e = cmd; \
if( e != hipSuccess ) { \
printf("Failed: Cuda error %s:%d '%s'\n", \
__FILE__,__LINE__,hipGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
} while(0)
void swap(float *lhs, float *rhs)
{
if (lhs == rhs)
return;
float tmp = *lhs;
*lhs = *rhs;
*rhs = tmp;
}
__global__ void fastJacc(unsigned int *allMols, unsigned int *queries, float *sims, int row, int size)
{
// take steps over each fingerprint in allmols
// aka grid-stride loop
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// the cardinality is stored as the first int, always.
int cardX = queries[65 * row];
for (int i = index * 65; i < size * 65; i += 65 * stride) {
int totalSize = 0;
// unroll the loop into 16 loops of 4. 15% speedup.
#pragma unroll 4
for (int j = 1; j < 65; j++)
{
unsigned int x = queries[(65 * row) + j];
unsigned int y = allMols[i + j];
totalSize += __popc(x & y);
}
int cardY = allMols[i];
// alternate eq for jaccard score: (int(x, y) / |x| + |y| + int(x, y)).
int jaccDenom = cardX + cardY - totalSize;
// theoretically can have div by 0 error, however, jaccard score isn't defined for int(x, y) = 0,
// and the chances of it happening are basically 0.
// so don't waste the compute power to guard against it. if you're getting errors here,
// your data is probably corrupted.
sims[(size * row) + (i / 65)] = float(totalSize) / jaccDenom;
}
}
// function to get the topk from an array of similarities
void populateMinHeap(float *array, minHeap *m, int size) {
for (int i = 0; i < size; i++) {
if (m->size < m->maxSize) {
insertNode(m, array[i], i);
}
else if (array[i] > getMinNode(m)) {
// heapify instead?
deleteNode(m);
insertNode(m, array[i], i);
}
}
}
int main(int argc, char* argv[])
{
if (argc != 6) {
printf("%s\n", "Must input 4 arguements, in order, input database, input queries, output file, int block size, int top_k.");
printf("%s\n", "Example run: fastSearch_CUDA mols.bin inq.bin search.txt 1024 30");
exit(EXIT_FAILURE);
}
FILE *database_ptr;
database_ptr = fopen(argv[1], "rb"); // r for read, b for binary
if (database_ptr == NULL) {
printf("Couldn't find input database. Is the path correct?");
exit(EXIT_FAILURE);
}
FILE *queries_ptr;
queries_ptr = fopen(argv[2], "rb"); // r for read, b for binary
if (queries_ptr == NULL) {
printf("Couldn't find input queries. Is the path correct?");
exit(EXIT_FAILURE);
}
// start reading the database file
int size;
// read num mols
fread(&size, sizeof(int), 1, database_ptr);
unsigned int *mols;
// Allocate Unified Memory -- accessible from CPU or GPU
hipMallocManaged(&mols, sizeof(int) * size * (64 + 1));
// actually read the file to unified memory
int freadReturnVal = fread(mols, sizeof(int) * (64 + 1), size, database_ptr);
if (freadReturnVal != size) {
printf("%s", "Bad read on input database. Do you have enough memory? Is the file corrupted? Has it been preprocessed with convert_to_binary?");
exit(EXIT_FAILURE);
}
fclose(database_ptr);
// exact same as above
int num_queries;
fread(&num_queries, sizeof(int), 1, queries_ptr); // read num queries
unsigned int *queries;
hipMallocManaged(&queries, sizeof(int) * num_queries * (64 + 1));
freadReturnVal = fread(queries, sizeof(int) * (64 + 1), num_queries, queries_ptr);
if (freadReturnVal != num_queries) {
printf("%s", "Bad read on input queries. Do you have enough memory? Is the file corrupted? Has it been preprocessed with convert_to_binary?");
exit(EXIT_FAILURE);
}
fclose(queries_ptr);
int numToRun = num_queries;
printf("%s", "Number of queries: ");
printf("%d\n", num_queries);
int k = atoi(argv[5]);
clock_t start, end;
double cpu_time_used;
start = clock();
float *sims;
// width * height -- CUDA doesn't like 2d arrays
hipMallocManaged(&sims, (size * sizeof(float)) * (numToRun));
// this value should probably be about 1024
int blockSize = atoi(argv[4]);
int numBlocks = (size + blockSize - 1) / blockSize;
for (int i = 0; i < numToRun; i++)
{
fastJacc << <numBlocks, blockSize >> > (mols, queries, sims, i, size);
}
CUDACHECK(hipDeviceSynchronize());
struct minHeap **heaps = (struct minHeap**) malloc(sizeof(struct minHeap *) * numToRun);
for (int i = 0; i < numToRun; i++) {
heaps[i] = initMinHeap(k);
populateMinHeap(&sims[(i * size)], heaps[i], size);
// uncomment these if you want to look at your data
// preorderTraversal(heaps[i], 0);
// printf("%s\n", "");
}
end = clock();
cpu_time_used = (((double)(end - start)) / CLOCKS_PER_SEC);
printf("%s", "Time to run queries: ");
printf("%f\n", cpu_time_used);
FILE *out = fopen(argv[3], "w");
for (int i = 0; i < numToRun; i++) {
minHeap *currHeap = heaps[i];
for (int j = 0; j < k; j++) {
fprintf(out, "%s", "(");
fprintf(out, "%.4f", currHeap->elem[j].data);
fprintf(out, "%s", ", ");
fprintf(out, "%d", currHeap->elem[j].idx);
fprintf(out, "%s", ")");
if (j + 1 < size)
fprintf(out, "%s", ", ");
}
fprintf(out, "%s", "\n");
}
fclose(out);
// Free memory
free(heaps);
hipFree(queries);
hipFree(mols);
hipFree(sims);
return 0;
} | 5fb9f543cda364af9e4ec7762fb82b02ddcb9dcb.cu | #include <iostream>
#include <stdio.h>
#include <time.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "minHeap.c"
#include <assert.h>
#define CUDACHECK(cmd) do { \
cudaError_t e = cmd; \
if( e != cudaSuccess ) { \
printf("Failed: Cuda error %s:%d '%s'\n", \
__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
} while(0)
void swap(float *lhs, float *rhs)
{
if (lhs == rhs)
return;
float tmp = *lhs;
*lhs = *rhs;
*rhs = tmp;
}
__global__ void fastJacc(unsigned int *allMols, unsigned int *queries, float *sims, int row, int size)
{
// take steps over each fingerprint in allmols
// aka grid-stride loop
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// the cardinality is stored as the first int, always.
int cardX = queries[65 * row];
for (int i = index * 65; i < size * 65; i += 65 * stride) {
int totalSize = 0;
// unroll the loop into 16 loops of 4. 15% speedup.
#pragma unroll 4
for (int j = 1; j < 65; j++)
{
unsigned int x = queries[(65 * row) + j];
unsigned int y = allMols[i + j];
totalSize += __popc(x & y);
}
int cardY = allMols[i];
// alternate eq for jaccard score: (int(x, y) / |x| + |y| + int(x, y)).
int jaccDenom = cardX + cardY - totalSize;
// theoretically can have div by 0 error, however, jaccard score isn't defined for int(x, y) = 0,
// and the chances of it happening are basically 0.
// so don't waste the compute power to guard against it. if you're getting errors here,
// your data is probably corrupted.
sims[(size * row) + (i / 65)] = float(totalSize) / jaccDenom;
}
}
// function to get the topk from an array of similarities
void populateMinHeap(float *array, minHeap *m, int size) {
for (int i = 0; i < size; i++) {
if (m->size < m->maxSize) {
insertNode(m, array[i], i);
}
else if (array[i] > getMinNode(m)) {
// heapify instead?
deleteNode(m);
insertNode(m, array[i], i);
}
}
}
int main(int argc, char* argv[])
{
if (argc != 6) {
printf("%s\n", "Must input 4 arguements, in order, input database, input queries, output file, int block size, int top_k.");
printf("%s\n", "Example run: fastSearch_CUDA mols.bin inq.bin search.txt 1024 30");
exit(EXIT_FAILURE);
}
FILE *database_ptr;
database_ptr = fopen(argv[1], "rb"); // r for read, b for binary
if (database_ptr == NULL) {
printf("Couldn't find input database. Is the path correct?");
exit(EXIT_FAILURE);
}
FILE *queries_ptr;
queries_ptr = fopen(argv[2], "rb"); // r for read, b for binary
if (queries_ptr == NULL) {
printf("Couldn't find input queries. Is the path correct?");
exit(EXIT_FAILURE);
}
// start reading the database file
int size;
// read num mols
fread(&size, sizeof(int), 1, database_ptr);
unsigned int *mols;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&mols, sizeof(int) * size * (64 + 1));
// actually read the file to unified memory
int freadReturnVal = fread(mols, sizeof(int) * (64 + 1), size, database_ptr);
if (freadReturnVal != size) {
printf("%s", "Bad read on input database. Do you have enough memory? Is the file corrupted? Has it been preprocessed with convert_to_binary?");
exit(EXIT_FAILURE);
}
fclose(database_ptr);
// exact same as above
int num_queries;
fread(&num_queries, sizeof(int), 1, queries_ptr); // read num queries
unsigned int *queries;
cudaMallocManaged(&queries, sizeof(int) * num_queries * (64 + 1));
freadReturnVal = fread(queries, sizeof(int) * (64 + 1), num_queries, queries_ptr);
if (freadReturnVal != num_queries) {
printf("%s", "Bad read on input queries. Do you have enough memory? Is the file corrupted? Has it been preprocessed with convert_to_binary?");
exit(EXIT_FAILURE);
}
fclose(queries_ptr);
int numToRun = num_queries;
printf("%s", "Number of queries: ");
printf("%d\n", num_queries);
int k = atoi(argv[5]);
clock_t start, end;
double cpu_time_used;
start = clock();
float *sims;
// width * height -- CUDA doesn't like 2d arrays
cudaMallocManaged(&sims, (size * sizeof(float)) * (numToRun));
// this value should probably be about 1024
int blockSize = atoi(argv[4]);
int numBlocks = (size + blockSize - 1) / blockSize;
for (int i = 0; i < numToRun; i++)
{
fastJacc << <numBlocks, blockSize >> > (mols, queries, sims, i, size);
}
CUDACHECK(cudaDeviceSynchronize());
struct minHeap **heaps = (struct minHeap**) malloc(sizeof(struct minHeap *) * numToRun);
for (int i = 0; i < numToRun; i++) {
heaps[i] = initMinHeap(k);
populateMinHeap(&sims[(i * size)], heaps[i], size);
// uncomment these if you want to look at your data
// preorderTraversal(heaps[i], 0);
// printf("%s\n", "");
}
end = clock();
cpu_time_used = (((double)(end - start)) / CLOCKS_PER_SEC);
printf("%s", "Time to run queries: ");
printf("%f\n", cpu_time_used);
FILE *out = fopen(argv[3], "w");
for (int i = 0; i < numToRun; i++) {
minHeap *currHeap = heaps[i];
for (int j = 0; j < k; j++) {
fprintf(out, "%s", "(");
fprintf(out, "%.4f", currHeap->elem[j].data);
fprintf(out, "%s", ", ");
fprintf(out, "%d", currHeap->elem[j].idx);
fprintf(out, "%s", ")");
if (j + 1 < size)
fprintf(out, "%s", ", ");
}
fprintf(out, "%s", "\n");
}
fclose(out);
// Free memory
free(heaps);
cudaFree(queries);
cudaFree(mols);
cudaFree(sims);
return 0;
} |
7190deb694609212fb0589b691077b2ffe96a6f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void UpdateSecond(float *WHAT , float *WITH , float AMOUNT , float *MULT)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
WHAT[idx] *=MULT[idx];
WHAT[idx] +=AMOUNT*WITH[idx];
MULT[idx] = 1.0f;
} | 7190deb694609212fb0589b691077b2ffe96a6f0.cu | #include "includes.h"
__global__ void UpdateSecond(float *WHAT , float *WITH , float AMOUNT , float *MULT)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
WHAT[idx] *=MULT[idx];
WHAT[idx] +=AMOUNT*WITH[idx];
MULT[idx] = 1.0f;
} |
984b2794094ee0687e600c30434cf522099d9d90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <vector>
#include "caffe/layers/contrastive_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(
count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(
CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data(), // \Sum (a_i-b_i)^2
-1);
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
loss += dist_sq_.cpu_data()[i];
} else { // dissimilar pairs
if (legacy_version) {
loss += ::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
} else {
Dtype dist = ::max(margin - sqrt(dist_sq_.cpu_data()[i]),
Dtype(0.0));
loss += dist*dist;
}
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void CLLBackward(const int count, const int channels,
const Dtype margin, const bool legacy_version, const Dtype alpha,
const Dtype* y, const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version) {
mdist = (margin - dist_sq[n]);
beta = -alpha;
} else {
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0) {
bottom_diff[i] = beta;
} else {
bottom_diff[i] = 0;
}
}
}
}
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(bottom[0]->num());
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( CLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, channels, margin, legacy_version, alpha,
bottom[2]->gpu_data(), // pair similarity 0 or 1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer);
} // namespace caffe
| 984b2794094ee0687e600c30434cf522099d9d90.cu | #include <algorithm>
#include <vector>
#include "caffe/layers/contrastive_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const int count = bottom[0]->count();
caffe_gpu_sub(
count,
bottom[0]->gpu_data(), // a
bottom[1]->gpu_data(), // b
diff_.mutable_gpu_data()); // a_i-b_i
caffe_gpu_powx(
count,
diff_.mutable_gpu_data(), // a_i-b_i
Dtype(2),
diff_sq_.mutable_gpu_data()); // (a_i-b_i)^2
caffe_gpu_gemv(
CblasNoTrans,
bottom[0]->num(),
bottom[0]->channels(),
Dtype(1.0),
diff_sq_.gpu_data(), // (a_i-b_i)^2
summer_vec_.gpu_data(),
Dtype(0.0),
dist_sq_.mutable_gpu_data(), // \Sum (a_i-b_i)^2
-1);
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
Dtype loss(0.0);
for (int i = 0; i < bottom[0]->num(); ++i) {
if (static_cast<int>(bottom[2]->cpu_data()[i])) { // similar pairs
loss += dist_sq_.cpu_data()[i];
} else { // dissimilar pairs
if (legacy_version) {
loss += std::max(margin - dist_sq_.cpu_data()[i], Dtype(0.0));
} else {
Dtype dist = std::max(margin - sqrt(dist_sq_.cpu_data()[i]),
Dtype(0.0));
loss += dist*dist;
}
}
}
loss = loss / static_cast<Dtype>(bottom[0]->num()) / Dtype(2);
top[0]->mutable_cpu_data()[0] = loss;
}
template <typename Dtype>
__global__ void CLLBackward(const int count, const int channels,
const Dtype margin, const bool legacy_version, const Dtype alpha,
const Dtype* y, const Dtype* diff, const Dtype* dist_sq,
Dtype *bottom_diff) {
CUDA_KERNEL_LOOP(i, count) {
int n = i / channels; // the num index, to access y and dist_sq
if (static_cast<int>(y[n])) { // similar pairs
bottom_diff[i] = alpha * diff[i];
} else { // dissimilar pairs
Dtype mdist(0.0);
Dtype beta(0.0);
if (legacy_version) {
mdist = (margin - dist_sq[n]);
beta = -alpha;
} else {
Dtype dist = sqrt(dist_sq[n]);
mdist = (margin - dist);
beta = -alpha * mdist / (dist + Dtype(1e-4)) * diff[i];
}
if (mdist > 0.0) {
bottom_diff[i] = beta;
} else {
bottom_diff[i] = 0;
}
}
}
}
template <typename Dtype>
void ContrastiveLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for (int i = 0; i < 2; ++i) {
if (propagate_down[i]) {
const int count = bottom[0]->count();
const int channels = bottom[0]->channels();
Dtype margin = this->layer_param_.contrastive_loss_param().margin();
const bool legacy_version =
this->layer_param_.contrastive_loss_param().legacy_version();
const Dtype sign = (i == 0) ? 1 : -1;
const Dtype alpha = sign * top[0]->cpu_diff()[0] /
static_cast<Dtype>(bottom[0]->num());
// NOLINT_NEXT_LINE(whitespace/operators)
CLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, channels, margin, legacy_version, alpha,
bottom[2]->gpu_data(), // pair similarity 0 or 1
diff_.gpu_data(), // the cached eltwise difference between a and b
dist_sq_.gpu_data(), // the cached square distance between a and b
bottom[i]->mutable_gpu_diff());
CUDA_POST_KERNEL_CHECK;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(ContrastiveLossLayer);
} // namespace caffe
|
6ef71f948cb30922201c71cf6887fed746e741a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pointwise_hist1.cuh"
#include "split_properties_helpers.cuh"
#include <hip/hip_cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int InnerHistBitsCount,
int BlockSize>
struct TPointHistOneByte {
volatile float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
const int blocks = 8 >> InnerHistBitsCount;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (InnerHistBitsCount + 2)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistOneByte(float* buff) {
const int HIST_SIZE = 32 * BlockSize;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BlockSize) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__device__ void AddPoint(ui32 ci, const float t) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
short f = (threadIdx.x + i) & 3;
int bin = bfe(ci, 24 - 8 * f, 8);
const float statToAdd = (bin >> (5 + InnerHistBitsCount)) == 0 ? t : 0;
const int mask = (1 << InnerHistBitsCount) - 1;
const int higherBin = (bin >> 5) & mask;
int offset = 4 * higherBin + f + ((bin & 31) << 5);
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
if (pass == higherBin) {
Buffer[offset] += statToAdd;
}
}
} else {
syncTile.sync();
Buffer[offset] += statToAdd;
}
}
}
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) {
float sum = 0;
//12 iterations
#pragma unroll 12
for (int i = start; i < 32 * BlockSize; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
//now we have only 1024 entries hist
const int warpHistBlockCount = 8 >> InnerHistBitsCount;
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
float sum[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] = 0.0f;
}
if (fold < histSize) {
const int warpHistSize = 1024;
const int lowerBitsOffset = (fold & 31) << 5;
const int higherBin = (fold >> 5) & ((1 << InnerHistBitsCount) - 1);
const int blockSize = 4 * (1 << InnerHistBitsCount);
const volatile float* src = Buffer + warpHistSize + lowerBitsOffset + 4 * higherBin;
#pragma unroll
for (int block = 0; block < warpHistBlockCount; ++block) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] += src[i + block * blockSize];
}
}
}
__syncthreads();
if (fold < histSize) {
for (int i = 0; i < 4; ++i) {
Buffer[histSize * i + fold] = sum[i];
}
}
__syncthreads();
}
};
template <int BlockSize>
struct TPointHistHalfByte {
volatile float* Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 512 * (threadIdx.x / 32);
const int innerHistStart = threadIdx.x & 24;
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistHalfByte(float* buff) {
const int histSize = 16 * BlockSize;
for (int i = threadIdx.x; i < histSize; i += BlockSize) {
buff[i] = 0;
}
__syncthreads();
Buffer = buff + SliceOffset();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t) {
thread_block_tile<8> addToHistTile = tiled_partition<8>(this_thread_block());
#pragma unroll 4
for (int i = 0; i < 8; i++) {
const int f = (threadIdx.x + i) & 7;
short bin = bfe(ci, 28 - 4 * f, 4);
bin <<= 5;
bin += f;
Buffer[bin] += t;
addToHistTile.sync();
}
}
__device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int HIST_SIZE = 16 * BlockSize;
float sum = 0;
if (threadIdx.x < 512) {
for (int i = threadIdx.x; i < HIST_SIZE; i += 512) {
sum += Buffer[i];
}
}
__syncthreads();
if (threadIdx.x < 512) {
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
const int fold = (threadIdx.x >> 3) & 15;
float sum = 0.0f;
if (threadIdx.x < 128) {
const int featureId = threadIdx.x & 7;
#pragma unroll
for (int group = 0; group < 4; ++group) {
sum += Buffer[32 * fold + featureId + 8 * group];
}
}
__syncthreads();
if (threadIdx.x < 128) {
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
};
template <int StripeSize, int OuterUnroll, int N, typename THist>
__forceinline__ __device__ void ComputeHistogram(int BlocksPerFeature, const ui32* __restrict__ indices,
ui32 offset, ui32 dsSize,
const float* __restrict__ target,
const ui32* __restrict__ cindex,
float* result) {
target += offset;
indices += offset;
THist hist(result);
ui32 i = (threadIdx.x & 31) + (threadIdx.x / 32) * 32;
//all operations should be warp-aligned
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
ui32 lastId = min(dsSize, 32 - (offset & 31));
if ((blockIdx.x % BlocksPerFeature) == 0) {
const int index = i < lastId ? __ldg(indices + i) : 0;
const ui32 ci = i < lastId ? __ldg(cindex + index) : 0;
const float wt = i < lastId ? __ldg(target + i) : 0;
hist.AddPoint(ci, wt);
}
dsSize = dsSize > lastId > 0 ? dsSize - lastId : 0;
indices += lastId;
target += lastId;
}
//now lets align end
const ui32 unalignedTail = (dsSize & 31);
if (unalignedTail != 0) {
if ((blockIdx.x % BlocksPerFeature) == 0)
{
const ui32 tailOffset = dsSize - unalignedTail;
const int index = i < unalignedTail ? __ldg(indices + tailOffset + i) : 0;
const ui32 ci = i < unalignedTail ? __ldg(cindex + index) : 0;
const float wt = i < unalignedTail ? __ldg(target + tailOffset + i) : 0;
hist.AddPoint(ci, wt);
}
}
dsSize -= unalignedTail;
if (blockIdx.x % BlocksPerFeature == 0 && dsSize <= 0) {
__syncthreads();
hist.Reduce();
return;
}
indices += (blockIdx.x % BlocksPerFeature) * StripeSize;
target += (blockIdx.x % BlocksPerFeature) * StripeSize;
dsSize = dsSize > (blockIdx.x % BlocksPerFeature) * StripeSize ? dsSize - (blockIdx.x % BlocksPerFeature) * StripeSize : 0;
const ui32 stripe = StripeSize * BlocksPerFeature;
if (dsSize) {
ui32 iteration_count = dsSize > i ? (dsSize - i + (stripe - 1)) / stripe : 0;
ui32 blocked_iteration_count = dsSize > (i | 31) ? ((dsSize - (i | 31) + (stripe - 1)) / stripe) / N : 0;
target += i;
indices += i;
#pragma unroll OuterUnroll
for (ui32 j = 0; j < blocked_iteration_count; ++j) {
ui32 local_index[N];
#pragma unroll
for (int k = 0; k < N; k++) {
local_index[k] = __ldg(indices + stripe * k);
}
ui32 local_ci[N];
float local_wt[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
local_ci[k] = __ldg(cindex + local_index[k]);
local_wt[k] = __ldg(target + stripe * k);
}
#pragma unroll
for (int k = 0; k < N; ++k) {
hist.AddPoint(local_ci[k], local_wt[k]);
}
indices += stripe * N;
target += stripe * N;
}
for (ui32 k = blocked_iteration_count * N; k < iteration_count; ++k) {
const int index = __ldg(indices);
ui32 ci = __ldg(cindex + index);
float wt = __ldg(target);
hist.AddPoint(ci, wt);
indices += stripe;
target += stripe;
}
__syncthreads();
hist.Reduce();
}
}
template <int StripeSize, int OuterUnroll, typename THist>
__forceinline__ __device__ void ComputeHistogram64BitLoads(int BlocksPerFeature, const ui32* __restrict__ indices,
ui32 offset, ui32 dsSize,
const float* __restrict__ target,
const ui32* __restrict__ cindex,
float* result) {
target += offset;
indices += offset;
THist hist(result);
if (dsSize) {
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
ui32 lastId = min(dsSize, 128 - (offset & 127));
ui32 colId = (threadIdx.x & 31) + (threadIdx.x / 32 ) * 32;
if ((blockIdx.x % BlocksPerFeature) == 0)
{
for (; (colId < 128); colId += blockDim.x)
{
const int index = colId < lastId ? __ldg(indices + colId) : 0;
const ui32 ci = colId < lastId ? __ldg(cindex + index) : 0;
const float wt = colId < lastId ? __ldg(target + colId) : 0;
hist.AddPoint(ci, wt);
}
}
dsSize = dsSize > lastId ? dsSize - lastId : 0;
indices += lastId;
target += lastId;
}
//now lets align end
const ui32 unalignedTail = (dsSize & 63);
if (unalignedTail != 0) {
if ((blockIdx.x % BlocksPerFeature) == 0)
{
ui32 colId = (threadIdx.x & 31) + (threadIdx.x / 32 ) * 32;
const ui32 tailOffset = dsSize - unalignedTail;
for (; (colId < 64); colId += blockDim.x) {
const int index = colId < unalignedTail ? __ldg(indices + tailOffset + colId) : 0;
const ui32 ci = colId < unalignedTail ? __ldg(cindex + index) : 0;
const float wt = colId < unalignedTail ? __ldg(target + tailOffset + colId) : 0;
hist.AddPoint(ci, wt);
}
}
}
dsSize -= unalignedTail;
if (dsSize <= 0) {
if ((blockIdx.x % BlocksPerFeature) == 0) {
__syncthreads();
hist.Reduce();
}
return;
}
indices += (blockIdx.x % BlocksPerFeature) * StripeSize * 2;
target += (blockIdx.x % BlocksPerFeature) * StripeSize * 2;
const ui32 stripe = StripeSize * BlocksPerFeature * 2;
dsSize = dsSize > (blockIdx.x % BlocksPerFeature) * StripeSize * 2 ? dsSize - (blockIdx.x % BlocksPerFeature) * StripeSize * 2 : 0;
if (dsSize) {
ui32 iterCount;
{
const ui32 i = 2 * ((threadIdx.x & 31) + (threadIdx.x / 32) * 32);
target += i;
indices += i;
iterCount = dsSize > i ? (dsSize - i + (stripe - 1)) / stripe : 0;
}
#pragma unroll OuterUnroll
for (int j = 0; j < iterCount; ++j) {
const uint2 localIndices = __ldg((uint2*) indices);
const ui32 firstBin = __ldg(cindex + localIndices.x);
const ui32 secondBin = __ldg(cindex + localIndices.y);
const float2 localTarget = __ldg((float2* )(target));
hist.AddPoint(firstBin, localTarget.x);
hist.AddPoint(secondBin, localTarget.y);
indices += stripe;
target += stripe;
}
__syncthreads();
hist.Reduce();
}
}
}
template <int BlockSize,
int InnerHistBitsCount,
bool Use64BitLoads>
__forceinline__ __device__ void ComputeSplitPropertiesPass(int BlocksPerFeature, const TCFeature* __restrict__ feature,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, int fCount,
float* binSumsForPart,
float* __restrict__ smem) {
using THist = TPointHistOneByte<InnerHistBitsCount, BlockSize>;
if (Use64BitLoads) {
#if __CUDA_ARCH__ < 300
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int outerUnroll = 2;
#else
const int outerUnroll = InnerHistBitsCount == 0 ? 4 : 2;
#endif
const ui32 size = partition->Size;
const ui32 offset = partition->Offset;
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist > (BlocksPerFeature,
indices,
offset,
size,
target,
cindex,
smem);
} else {
#if __CUDA_ARCH__ < 300
const int innerUnroll = InnerHistBitsCount == 0 ? 4 : 2;
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = InnerHistBitsCount == 0 ? 8 : 4;
const int outerUnroll = 2;
#else
const int innerUnroll = 4;
const int outerUnroll = 2;
#endif
ComputeHistogram<BlockSize, outerUnroll, innerUnroll, THist>(BlocksPerFeature,
indices,
partition->Offset,
partition->Size,
target,
cindex,
smem);
}
__syncthreads();
const ui32 fold = threadIdx.x;
const ui32 histSize = 1 << (5 + InnerHistBitsCount);
#pragma unroll 4
for (int fid = 0; fid < fCount; ++fid) {
if (fold < feature[fid].Folds) {
const float val = smem[fid * histSize + fold];
if (abs(val) > 1e-20f) {
if (BlocksPerFeature > 1) {
atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold), val);
} else {
WriteThrough(binSumsForPart + (feature[fid].FirstFoldIndex + fold), val);
}
}
}
}
}
#define DECLARE_PASS(I, M, USE_64_BIT_LOAD) \
ComputeSplitPropertiesPass<BlockSize, I, USE_64_BIT_LOAD>(M, feature, cindex, target, indices, partition, fCount, binSums, &counters[0]);
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ == 600
__launch_bounds__(BlockSize, 1)
#elif __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesNBImpl(int M, const TCFeature* __restrict__ feature, int fCount,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 4;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 4, 4);
__shared__ float counters[32 * BlockSize];
const ui32 maxBinCount = GetMaxBinCount(feature, fCount, (ui32*) &counters[0]);
__syncthreads();
//CatBoost always use direct loads on first pass of histograms calculation and for this step 64-bits loads are almost x2 faster
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = IsFullPass;// float2 for target/indices/weights
#else
const bool use64BitLoad = false;
#endif
if (partition->Size) {
if (maxBinCount <= 32) {
DECLARE_PASS(0, M, use64BitLoad);
} else if (maxBinCount <= 64) {
DECLARE_PASS(1, M, false);
} else if (maxBinCount <= 128) {
DECLARE_PASS(2, M, false);
} else {
DECLARE_PASS(3, M, false);
}
}
}
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesBImpl(int M,
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 32;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 32, 32);
__shared__ float counters[16 * BlockSize];
if (partition->Size) {
using THist = TPointHistHalfByte<BlockSize>;
#if __CUDA_ARCH__ > 350
const bool use64bitLoad = IsFullPass;
#else
const bool use64bitLoad = false;
#endif
if (use64bitLoad) {
//full pass
#if __CUDA_ARCH__ <= 350
const int outerUnroll = 1;
#else
const int outerUnroll = 1;
#endif
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist > (M, indices, partition->Offset, partition->Size, target, cindex, &counters[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int innerUnroll = 2;
const int outerUnroll = 1;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = 4;
const int outerUnroll = 1;
#else
const int innerUnroll = 1;
const int outerUnroll = 1;
#endif
ComputeHistogram < BlockSize, outerUnroll, innerUnroll, THist > (M, indices,
partition->Offset,
partition->Size,
target,
cindex,
&counters[0]);
}
ui32 fid = threadIdx.x;
if (fid < fCount) {
const ui32 groupId = fid / 4;
const ui32 fMask = 1 << (3 - (fid & 3));
float sum = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
sum += counters[8 * i + groupId];
}
}
if (abs(sum) > 1e-20f) {
if (M > 1) {
atomicAdd(binSums + feature[fid].FirstFoldIndex, sum);
} else {
binSums[feature[fid].FirstFoldIndex] = sum;
}
}
}
}
}
template <int BlockSize,
int BlocksPerFeatureCount>
inline void RunComputeHist1NonBinaryKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const ui32* indices,
const TDataPartition* partition,
float* binSums,
const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass) {
ComputeSplitPropertiesNBImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount,
nbFeatures, nbCount, cindex, target,
indices, partition, binSums, binFeatureCount
);
} else {
ComputeSplitPropertiesNBImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>( BlocksPerFeatureCount,
nbFeatures, nbCount, cindex, target,
indices, partition, binSums, binFeatureCount
);
}
}
template <int BlockSize, int BlocksPerFeatureCount>
void RunComputeHist1BinaryKernel(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const ui32* indices,
const TDataPartition* partition,
float* binSums,
int histLineSize,
bool fullPass,
TCudaStream stream,
dim3 numBlocks) {
if (fullPass) {
ComputeSplitPropertiesBImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount, bFeatures, bCount, cindex, target, indices, partition, binSums, histLineSize);
} else {
ComputeSplitPropertiesBImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount, bFeatures, bCount, cindex, target, indices, partition, binSums, histLineSize);
}
};
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesHalfByteImpl(
int M,
const TCFeature* __restrict__ feature, int fCount,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 8;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 8, 8);
__shared__ float smem[16 * BlockSize];
if (partition->Size) {
using THist = TPointHistHalfByte<BlockSize>;
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = IsFullPass;
#else
const bool use64BitLoad = false;
#endif
if (use64BitLoad) {
#if __CUDA_ARCH__ <= 350
const int outerUnroll = 2;
#else
const int outerUnroll = 1;
#endif
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist >(M, indices, partition->Offset, partition->Size, target, cindex, &smem[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int innerUnroll = 2;
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = 4;
const int outerUnroll = 2;
#else
const int innerUnroll = 1;
const int outerUnroll = 1;
#endif
ComputeHistogram < BlockSize, outerUnroll, innerUnroll, THist > (M, indices, partition->Offset, partition->Size, target, cindex, &smem[0]);
}
__syncthreads();
const ui32 fid = threadIdx.x >> 4;
const ui32 fold = threadIdx.x & 15;
if (fid < fCount && fold < feature[fid].Folds) {
const float result = smem[fold * 8 + fid];
if (abs(result) > 1e-20) {
if (M > 1) {
atomicAdd(binSums + feature[fid].FirstFoldIndex + fold, result);
} else {
binSums[feature[fid].FirstFoldIndex + fold] = result;
}
}
}
}
}
template <int BlockSize,
int BlocksPerFeatureCount>
inline void RunComputeHist1HalfByteKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target,
const ui32* indices,
const TDataPartition* partition,
float* binSums,
const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass) {
ComputeSplitPropertiesHalfByteImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(
BlocksPerFeatureCount, nbFeatures, nbCount, cindex, target, indices, partition, binSums, binFeatureCount
);
} else {
ComputeSplitPropertiesHalfByteImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>(
BlocksPerFeatureCount, nbFeatures, nbCount, cindex, target, indices, partition, binSums, binFeatureCount);
}
}
void ComputeHist1Binary(const TCFeature* bFeatures, ui32 bCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partsCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = (bCount + 31) / 32;
const ui32 histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = histCount;
numBlocks.z = foldCount;
constexpr ui32 BlockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64u);
numBlocks.x *= multiplier;
if (IsGridEmpty(numBlocks)) {
return;
}
if (bCount) {
#define COMPUTE(k) \
RunComputeHist1BinaryKernel<BlockSize, k>(bFeatures, bCount, cindex, target, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks); \
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8);
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
CB_ENSURE_INTERNAL(false, "Expected multiplier = 1, 2, 4, 8, 16, 32, or 64, not " << multiplier);
}
#undef COMPUTE
}
}
void ComputeHist1HalfByte(const TCFeature* halfByteFeatures, ui32 halfByteFeaturesCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partsCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8);
const ui32 histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = static_cast<ui32>(histCount);
numBlocks.z = foldCount;
constexpr ui32 BlockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (IsGridEmpty(numBlocks)) {
return;
}
if (halfByteFeaturesCount) {
#define COMPUTE(k)\
RunComputeHist1HalfByteKernel<BlockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\
target,\
indices, partition, binSums, histLineSize,\
fullPass,\
stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
CB_ENSURE_INTERNAL(false, "Expected multiplier = 1, 2, 4, 8, 16, 32, or 64, not " << multiplier);
}
#undef COMPUTE
}
}
void ComputeHist1NonBinary(const TCFeature* nbFeatures, ui32 nbCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
if (nbCount) {
dim3 numBlocks;
numBlocks.x = (nbCount + 3) / 4;
const ui32 histCount = (fullPass ? partCount : partCount / 2);
numBlocks.y = histCount;
numBlocks.z = foldCount;
constexpr ui32 BlockSize = 384;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (IsGridEmpty(numBlocks)) {
return;
}
#define COMPUTE(k) \
RunComputeHist1NonBinaryKernel<BlockSize, k>(nbFeatures, nbCount, cindex, target, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
CB_ENSURE_INTERNAL(false, "Expected multiplier = 1, 2, 4, 8, 16, 32, or 64, not " << multiplier);
}
#undef COMPUTE
}
}
}
| 6ef71f948cb30922201c71cf6887fed746e741a2.cu | #include "pointwise_hist1.cuh"
#include "split_properties_helpers.cuh"
#include <cooperative_groups.h>
#include <library/cpp/cuda/wrappers/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <int InnerHistBitsCount,
int BlockSize>
struct TPointHistOneByte {
volatile float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 1024 * (threadIdx.x / 32);
const int blocks = 8 >> InnerHistBitsCount;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (InnerHistBitsCount + 2)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistOneByte(float* buff) {
const int HIST_SIZE = 32 * BlockSize;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BlockSize) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__device__ void AddPoint(ui32 ci, const float t) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
#pragma unroll
for (int i = 0; i < 4; i++) {
short f = (threadIdx.x + i) & 3;
int bin = bfe(ci, 24 - 8 * f, 8);
const float statToAdd = (bin >> (5 + InnerHistBitsCount)) == 0 ? t : 0;
const int mask = (1 << InnerHistBitsCount) - 1;
const int higherBin = (bin >> 5) & mask;
int offset = 4 * higherBin + f + ((bin & 31) << 5);
if (InnerHistBitsCount > 0) {
#pragma unroll
for (int k = 0; k < (1 << InnerHistBitsCount); ++k) {
const int pass = ((threadIdx.x >> 2) + k) & mask;
syncTile.sync();
if (pass == higherBin) {
Buffer[offset] += statToAdd;
}
}
} else {
syncTile.sync();
Buffer[offset] += statToAdd;
}
}
}
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) {
float sum = 0;
//12 iterations
#pragma unroll 12
for (int i = start; i < 32 * BlockSize; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
//now we have only 1024 entries hist
const int warpHistBlockCount = 8 >> InnerHistBitsCount;
const int fold = threadIdx.x;
const int histSize = 1 << (5 + InnerHistBitsCount);
float sum[4];
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] = 0.0f;
}
if (fold < histSize) {
const int warpHistSize = 1024;
const int lowerBitsOffset = (fold & 31) << 5;
const int higherBin = (fold >> 5) & ((1 << InnerHistBitsCount) - 1);
const int blockSize = 4 * (1 << InnerHistBitsCount);
const volatile float* src = Buffer + warpHistSize + lowerBitsOffset + 4 * higherBin;
#pragma unroll
for (int block = 0; block < warpHistBlockCount; ++block) {
#pragma unroll
for (int i = 0; i < 4; ++i) {
sum[i] += src[i + block * blockSize];
}
}
}
__syncthreads();
if (fold < histSize) {
for (int i = 0; i < 4; ++i) {
Buffer[histSize * i + fold] = sum[i];
}
}
__syncthreads();
}
};
template <int BlockSize>
struct TPointHistHalfByte {
volatile float* Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpOffset = 512 * (threadIdx.x / 32);
const int innerHistStart = threadIdx.x & 24;
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHistHalfByte(float* buff) {
const int histSize = 16 * BlockSize;
for (int i = threadIdx.x; i < histSize; i += BlockSize) {
buff[i] = 0;
}
__syncthreads();
Buffer = buff + SliceOffset();
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t) {
thread_block_tile<8> addToHistTile = tiled_partition<8>(this_thread_block());
#pragma unroll 4
for (int i = 0; i < 8; i++) {
const int f = (threadIdx.x + i) & 7;
short bin = bfe(ci, 28 - 4 * f, 4);
bin <<= 5;
bin += f;
Buffer[bin] += t;
addToHistTile.sync();
}
}
__device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int HIST_SIZE = 16 * BlockSize;
float sum = 0;
if (threadIdx.x < 512) {
for (int i = threadIdx.x; i < HIST_SIZE; i += 512) {
sum += Buffer[i];
}
}
__syncthreads();
if (threadIdx.x < 512) {
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
const int fold = (threadIdx.x >> 3) & 15;
float sum = 0.0f;
if (threadIdx.x < 128) {
const int featureId = threadIdx.x & 7;
#pragma unroll
for (int group = 0; group < 4; ++group) {
sum += Buffer[32 * fold + featureId + 8 * group];
}
}
__syncthreads();
if (threadIdx.x < 128) {
Buffer[threadIdx.x] = sum;
}
__syncthreads();
}
};
template <int StripeSize, int OuterUnroll, int N, typename THist>
__forceinline__ __device__ void ComputeHistogram(int BlocksPerFeature, const ui32* __restrict__ indices,
ui32 offset, ui32 dsSize,
const float* __restrict__ target,
const ui32* __restrict__ cindex,
float* result) {
target += offset;
indices += offset;
THist hist(result);
ui32 i = (threadIdx.x & 31) + (threadIdx.x / 32) * 32;
//all operations should be warp-aligned
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
ui32 lastId = min(dsSize, 32 - (offset & 31));
if ((blockIdx.x % BlocksPerFeature) == 0) {
const int index = i < lastId ? __ldg(indices + i) : 0;
const ui32 ci = i < lastId ? __ldg(cindex + index) : 0;
const float wt = i < lastId ? __ldg(target + i) : 0;
hist.AddPoint(ci, wt);
}
dsSize = dsSize > lastId > 0 ? dsSize - lastId : 0;
indices += lastId;
target += lastId;
}
//now lets align end
const ui32 unalignedTail = (dsSize & 31);
if (unalignedTail != 0) {
if ((blockIdx.x % BlocksPerFeature) == 0)
{
const ui32 tailOffset = dsSize - unalignedTail;
const int index = i < unalignedTail ? __ldg(indices + tailOffset + i) : 0;
const ui32 ci = i < unalignedTail ? __ldg(cindex + index) : 0;
const float wt = i < unalignedTail ? __ldg(target + tailOffset + i) : 0;
hist.AddPoint(ci, wt);
}
}
dsSize -= unalignedTail;
if (blockIdx.x % BlocksPerFeature == 0 && dsSize <= 0) {
__syncthreads();
hist.Reduce();
return;
}
indices += (blockIdx.x % BlocksPerFeature) * StripeSize;
target += (blockIdx.x % BlocksPerFeature) * StripeSize;
dsSize = dsSize > (blockIdx.x % BlocksPerFeature) * StripeSize ? dsSize - (blockIdx.x % BlocksPerFeature) * StripeSize : 0;
const ui32 stripe = StripeSize * BlocksPerFeature;
if (dsSize) {
ui32 iteration_count = dsSize > i ? (dsSize - i + (stripe - 1)) / stripe : 0;
ui32 blocked_iteration_count = dsSize > (i | 31) ? ((dsSize - (i | 31) + (stripe - 1)) / stripe) / N : 0;
target += i;
indices += i;
#pragma unroll OuterUnroll
for (ui32 j = 0; j < blocked_iteration_count; ++j) {
ui32 local_index[N];
#pragma unroll
for (int k = 0; k < N; k++) {
local_index[k] = __ldg(indices + stripe * k);
}
ui32 local_ci[N];
float local_wt[N];
#pragma unroll
for (int k = 0; k < N; ++k) {
local_ci[k] = __ldg(cindex + local_index[k]);
local_wt[k] = __ldg(target + stripe * k);
}
#pragma unroll
for (int k = 0; k < N; ++k) {
hist.AddPoint(local_ci[k], local_wt[k]);
}
indices += stripe * N;
target += stripe * N;
}
for (ui32 k = blocked_iteration_count * N; k < iteration_count; ++k) {
const int index = __ldg(indices);
ui32 ci = __ldg(cindex + index);
float wt = __ldg(target);
hist.AddPoint(ci, wt);
indices += stripe;
target += stripe;
}
__syncthreads();
hist.Reduce();
}
}
template <int StripeSize, int OuterUnroll, typename THist>
__forceinline__ __device__ void ComputeHistogram64BitLoads(int BlocksPerFeature, const ui32* __restrict__ indices,
ui32 offset, ui32 dsSize,
const float* __restrict__ target,
const ui32* __restrict__ cindex,
float* result) {
target += offset;
indices += offset;
THist hist(result);
if (dsSize) {
//first: first warp make memory access aligned. it load first 32 - offset % 32 elements.
{
ui32 lastId = min(dsSize, 128 - (offset & 127));
ui32 colId = (threadIdx.x & 31) + (threadIdx.x / 32 ) * 32;
if ((blockIdx.x % BlocksPerFeature) == 0)
{
for (; (colId < 128); colId += blockDim.x)
{
const int index = colId < lastId ? __ldg(indices + colId) : 0;
const ui32 ci = colId < lastId ? __ldg(cindex + index) : 0;
const float wt = colId < lastId ? __ldg(target + colId) : 0;
hist.AddPoint(ci, wt);
}
}
dsSize = dsSize > lastId ? dsSize - lastId : 0;
indices += lastId;
target += lastId;
}
//now lets align end
const ui32 unalignedTail = (dsSize & 63);
if (unalignedTail != 0) {
if ((blockIdx.x % BlocksPerFeature) == 0)
{
ui32 colId = (threadIdx.x & 31) + (threadIdx.x / 32 ) * 32;
const ui32 tailOffset = dsSize - unalignedTail;
for (; (colId < 64); colId += blockDim.x) {
const int index = colId < unalignedTail ? __ldg(indices + tailOffset + colId) : 0;
const ui32 ci = colId < unalignedTail ? __ldg(cindex + index) : 0;
const float wt = colId < unalignedTail ? __ldg(target + tailOffset + colId) : 0;
hist.AddPoint(ci, wt);
}
}
}
dsSize -= unalignedTail;
if (dsSize <= 0) {
if ((blockIdx.x % BlocksPerFeature) == 0) {
__syncthreads();
hist.Reduce();
}
return;
}
indices += (blockIdx.x % BlocksPerFeature) * StripeSize * 2;
target += (blockIdx.x % BlocksPerFeature) * StripeSize * 2;
const ui32 stripe = StripeSize * BlocksPerFeature * 2;
dsSize = dsSize > (blockIdx.x % BlocksPerFeature) * StripeSize * 2 ? dsSize - (blockIdx.x % BlocksPerFeature) * StripeSize * 2 : 0;
if (dsSize) {
ui32 iterCount;
{
const ui32 i = 2 * ((threadIdx.x & 31) + (threadIdx.x / 32) * 32);
target += i;
indices += i;
iterCount = dsSize > i ? (dsSize - i + (stripe - 1)) / stripe : 0;
}
#pragma unroll OuterUnroll
for (int j = 0; j < iterCount; ++j) {
const uint2 localIndices = __ldg((uint2*) indices);
const ui32 firstBin = __ldg(cindex + localIndices.x);
const ui32 secondBin = __ldg(cindex + localIndices.y);
const float2 localTarget = __ldg((float2* )(target));
hist.AddPoint(firstBin, localTarget.x);
hist.AddPoint(secondBin, localTarget.y);
indices += stripe;
target += stripe;
}
__syncthreads();
hist.Reduce();
}
}
}
template <int BlockSize,
int InnerHistBitsCount,
bool Use64BitLoads>
__forceinline__ __device__ void ComputeSplitPropertiesPass(int BlocksPerFeature, const TCFeature* __restrict__ feature,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, int fCount,
float* binSumsForPart,
float* __restrict__ smem) {
using THist = TPointHistOneByte<InnerHistBitsCount, BlockSize>;
if (Use64BitLoads) {
#if __CUDA_ARCH__ < 300
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int outerUnroll = 2;
#else
const int outerUnroll = InnerHistBitsCount == 0 ? 4 : 2;
#endif
const ui32 size = partition->Size;
const ui32 offset = partition->Offset;
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist > (BlocksPerFeature,
indices,
offset,
size,
target,
cindex,
smem);
} else {
#if __CUDA_ARCH__ < 300
const int innerUnroll = InnerHistBitsCount == 0 ? 4 : 2;
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = InnerHistBitsCount == 0 ? 8 : 4;
const int outerUnroll = 2;
#else
const int innerUnroll = 4;
const int outerUnroll = 2;
#endif
ComputeHistogram<BlockSize, outerUnroll, innerUnroll, THist>(BlocksPerFeature,
indices,
partition->Offset,
partition->Size,
target,
cindex,
smem);
}
__syncthreads();
const ui32 fold = threadIdx.x;
const ui32 histSize = 1 << (5 + InnerHistBitsCount);
#pragma unroll 4
for (int fid = 0; fid < fCount; ++fid) {
if (fold < feature[fid].Folds) {
const float val = smem[fid * histSize + fold];
if (abs(val) > 1e-20f) {
if (BlocksPerFeature > 1) {
atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold), val);
} else {
WriteThrough(binSumsForPart + (feature[fid].FirstFoldIndex + fold), val);
}
}
}
}
}
#define DECLARE_PASS(I, M, USE_64_BIT_LOAD) \
ComputeSplitPropertiesPass<BlockSize, I, USE_64_BIT_LOAD>(M, feature, cindex, target, indices, partition, fCount, binSums, &counters[0]);
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ == 600
__launch_bounds__(BlockSize, 1)
#elif __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesNBImpl(int M, const TCFeature* __restrict__ feature, int fCount,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 4;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 4, 4);
__shared__ float counters[32 * BlockSize];
const ui32 maxBinCount = GetMaxBinCount(feature, fCount, (ui32*) &counters[0]);
__syncthreads();
//CatBoost always use direct loads on first pass of histograms calculation and for this step 64-bits loads are almost x2 faster
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = IsFullPass;// float2 for target/indices/weights
#else
const bool use64BitLoad = false;
#endif
if (partition->Size) {
if (maxBinCount <= 32) {
DECLARE_PASS(0, M, use64BitLoad);
} else if (maxBinCount <= 64) {
DECLARE_PASS(1, M, false);
} else if (maxBinCount <= 128) {
DECLARE_PASS(2, M, false);
} else {
DECLARE_PASS(3, M, false);
}
}
}
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesBImpl(int M,
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 32;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 32, 32);
__shared__ float counters[16 * BlockSize];
if (partition->Size) {
using THist = TPointHistHalfByte<BlockSize>;
#if __CUDA_ARCH__ > 350
const bool use64bitLoad = IsFullPass;
#else
const bool use64bitLoad = false;
#endif
if (use64bitLoad) {
//full pass
#if __CUDA_ARCH__ <= 350
const int outerUnroll = 1;
#else
const int outerUnroll = 1;
#endif
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist > (M, indices, partition->Offset, partition->Size, target, cindex, &counters[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int innerUnroll = 2;
const int outerUnroll = 1;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = 4;
const int outerUnroll = 1;
#else
const int innerUnroll = 1;
const int outerUnroll = 1;
#endif
ComputeHistogram < BlockSize, outerUnroll, innerUnroll, THist > (M, indices,
partition->Offset,
partition->Size,
target,
cindex,
&counters[0]);
}
ui32 fid = threadIdx.x;
if (fid < fCount) {
const ui32 groupId = fid / 4;
const ui32 fMask = 1 << (3 - (fid & 3));
float sum = 0.f;
#pragma uroll
for (int i = 0; i < 16; i++) {
if (!(i & fMask)) {
sum += counters[8 * i + groupId];
}
}
if (abs(sum) > 1e-20f) {
if (M > 1) {
atomicAdd(binSums + feature[fid].FirstFoldIndex, sum);
} else {
binSums[feature[fid].FirstFoldIndex] = sum;
}
}
}
}
}
template <int BlockSize,
int BlocksPerFeatureCount>
inline void RunComputeHist1NonBinaryKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const ui32* indices,
const TDataPartition* partition,
float* binSums,
const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass) {
ComputeSplitPropertiesNBImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount,
nbFeatures, nbCount, cindex, target,
indices, partition, binSums, binFeatureCount
);
} else {
ComputeSplitPropertiesNBImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>( BlocksPerFeatureCount,
nbFeatures, nbCount, cindex, target,
indices, partition, binSums, binFeatureCount
);
}
}
template <int BlockSize, int BlocksPerFeatureCount>
void RunComputeHist1BinaryKernel(const TCFeature* bFeatures, int bCount,
const ui32* cindex,
const float* target, const ui32* indices,
const TDataPartition* partition,
float* binSums,
int histLineSize,
bool fullPass,
TCudaStream stream,
dim3 numBlocks) {
if (fullPass) {
ComputeSplitPropertiesBImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount, bFeatures, bCount, cindex, target, indices, partition, binSums, histLineSize);
} else {
ComputeSplitPropertiesBImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>(BlocksPerFeatureCount, bFeatures, bCount, cindex, target, indices, partition, binSums, histLineSize);
}
};
template <int BlockSize, bool IsFullPass>
#if __CUDA_ARCH__ >= 520
__launch_bounds__(BlockSize, 2)
#else
__launch_bounds__(BlockSize, 1)
#endif
__global__ void ComputeSplitPropertiesHalfByteImpl(
int M,
const TCFeature* __restrict__ feature, int fCount,
const ui32* __restrict__ cindex,
const float* __restrict__ target,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass, 1);
feature += (blockIdx.x / M) * 8;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 8, 8);
__shared__ float smem[16 * BlockSize];
if (partition->Size) {
using THist = TPointHistHalfByte<BlockSize>;
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = IsFullPass;
#else
const bool use64BitLoad = false;
#endif
if (use64BitLoad) {
#if __CUDA_ARCH__ <= 350
const int outerUnroll = 2;
#else
const int outerUnroll = 1;
#endif
ComputeHistogram64BitLoads < BlockSize, outerUnroll, THist >(M, indices, partition->Offset, partition->Size, target, cindex, &smem[0]);
} else {
#if __CUDA_ARCH__ <= 300
const int innerUnroll = 2;
const int outerUnroll = 2;
#elif __CUDA_ARCH__ <= 350
const int innerUnroll = 4;
const int outerUnroll = 2;
#else
const int innerUnroll = 1;
const int outerUnroll = 1;
#endif
ComputeHistogram < BlockSize, outerUnroll, innerUnroll, THist > (M, indices, partition->Offset, partition->Size, target, cindex, &smem[0]);
}
__syncthreads();
const ui32 fid = threadIdx.x >> 4;
const ui32 fold = threadIdx.x & 15;
if (fid < fCount && fold < feature[fid].Folds) {
const float result = smem[fold * 8 + fid];
if (abs(result) > 1e-20) {
if (M > 1) {
atomicAdd(binSums + feature[fid].FirstFoldIndex + fold, result);
} else {
binSums[feature[fid].FirstFoldIndex + fold] = result;
}
}
}
}
}
template <int BlockSize,
int BlocksPerFeatureCount>
inline void RunComputeHist1HalfByteKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target,
const ui32* indices,
const TDataPartition* partition,
float* binSums,
const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass) {
ComputeSplitPropertiesHalfByteImpl < BlockSize, true > << <numBlocks, BlockSize, 0, stream>>>(
BlocksPerFeatureCount, nbFeatures, nbCount, cindex, target, indices, partition, binSums, binFeatureCount
);
} else {
ComputeSplitPropertiesHalfByteImpl < BlockSize, false > << <numBlocks, BlockSize, 0, stream>>>(
BlocksPerFeatureCount, nbFeatures, nbCount, cindex, target, indices, partition, binSums, binFeatureCount);
}
}
void ComputeHist1Binary(const TCFeature* bFeatures, ui32 bCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partsCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = (bCount + 31) / 32;
const ui32 histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = histCount;
numBlocks.z = foldCount;
constexpr ui32 BlockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64u);
numBlocks.x *= multiplier;
if (IsGridEmpty(numBlocks)) {
return;
}
if (bCount) {
#define COMPUTE(k) \
RunComputeHist1BinaryKernel<BlockSize, k>(bFeatures, bCount, cindex, target, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks); \
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8);
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
CB_ENSURE_INTERNAL(false, "Expected multiplier = 1, 2, 4, 8, 16, 32, or 64, not " << multiplier);
}
#undef COMPUTE
}
}
void ComputeHist1HalfByte(const TCFeature* halfByteFeatures, ui32 halfByteFeaturesCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partsCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
dim3 numBlocks;
numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8);
const ui32 histCount = fullPass ? partsCount : partsCount / 2;
numBlocks.y = static_cast<ui32>(histCount);
numBlocks.z = foldCount;
constexpr ui32 BlockSize = 768;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (IsGridEmpty(numBlocks)) {
return;
}
if (halfByteFeaturesCount) {
#define COMPUTE(k)\
RunComputeHist1HalfByteKernel<BlockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\
target,\
indices, partition, binSums, histLineSize,\
fullPass,\
stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
CB_ENSURE_INTERNAL(false, "Expected multiplier = 1, 2, 4, 8, 16, 32, or 64, not " << multiplier);
}
#undef COMPUTE
}
}
void ComputeHist1NonBinary(const TCFeature* nbFeatures, ui32 nbCount,
const ui32* cindex,
const float* target,
const ui32* indices,
ui32 size,
const TDataPartition* partition,
ui32 partCount,
ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
if (nbCount) {
dim3 numBlocks;
numBlocks.x = (nbCount + 3) / 4;
const ui32 histCount = (fullPass ? partCount : partCount / 2);
numBlocks.y = histCount;
numBlocks.z = foldCount;
constexpr ui32 BlockSize = 384;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
if (IsGridEmpty(numBlocks)) {
return;
}
#define COMPUTE(k) \
RunComputeHist1NonBinaryKernel<BlockSize, k>(nbFeatures, nbCount, cindex, target, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
CB_ENSURE_INTERNAL(false, "Expected multiplier = 1, 2, 4, 8, 16, 32, or 64, not " << multiplier);
}
#undef COMPUTE
}
}
}
|
fc4e79f5e94e52168b47ff153d5e4a0f83c64f1e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Example of how to use the mxGPUArray API in a MEX file. This example shows
* how to write a MEX function that takes a gpuArray input and returns a
* gpuArray output, e.g. B=mexFunction(A).
*
* Copyright 2012 The MathWorks, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdint.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cstdlib>
#include <algorithm>
#include <iostream>
using namespace std;
const int nt0 = 61, Nthreads = 1024, lockout = nt0-1, NchanMax = 128, block = 32, NrankMax = 6;
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void Conv1D(const double *Params, const float *data, const float *W, float *conv_sig){
__shared__ float sW[nt0*NrankMax], sdata[Nthreads+nt0];
float x;
int tid, tid0, bid, i, NT, Nfilt, NfiltALL;
tid = threadIdx.x;
bid = blockIdx.x;
Nfilt = (int) Params[1];
NT = (int) Params[0];
NfiltALL = Nfilt * ((int) Params[6]);
if(tid<nt0*((int) Params[6]))
sW[tid]= W[tid%nt0 + (bid + Nfilt * (tid/nt0))* nt0];
__syncthreads();
tid0 = 0;
while (tid0<NT-Nthreads-nt0+1){
if (tid<nt0) sdata[tid] = data[tid0 + tid+ NT*bid];
sdata[nt0+tid] = data[nt0+tid0 + tid+ NT*bid];
__syncthreads();
x = 0.0f;
while(bid<NfiltALL){
for(i=0;i<nt0;i++)
x += sW[i + (bid/Nfilt)*nt0] * sdata[i+tid];
bid+=Nfilt;
}
bid = blockIdx.x;
conv_sig[tid0 + tid + NT*bid] = x;
tid0+=Nthreads;
__syncthreads();
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void bestFilter(const double *Params, const float *data,
const float *mu, const float *lam, float *xbest, float *err, int *ftype){
int tid, tid0, i, bid, NT, Nfilt, ibest = 0;
float Th, Cf, Ci, xb, Cbest = 0.0f;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
Nfilt = (int) Params[1];
Th = (float) Params[2];
tid0 = tid + bid * Nthreads;
if (tid0<NT){
for (i=0; i<Nfilt;i++){
Ci = data[tid0 + NT * i];
Cf = 0.0f;
if (abs(Ci - mu[i]) < lam[i]*mu[i])
Cf = Ci * Ci;
if (Cf > Cbest){
Cbest = Cf;
xb = data[tid0 + NT * i]; //Ci / (lam[i] + 1);
ibest = i;
}
}
if (Cbest > Th*Th){
err[tid0] = Cbest;
xbest[tid0] = xb;
ftype[tid0] = ibest;
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void cleanup_spikes(const double *Params, const float *xbest, const float *err,
const int *ftype, int *st, int *id, float *x, float *C, int *counter){
int indx, maxFR, NTOT, tid, bid, NT, tid0, j;
volatile __shared__ float sdata[Nthreads+2*lockout+1];
bool flag=0;
float err0;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
maxFR = (int) Params[3];
tid0 = bid * Nthreads;
if(tid0<NT-Nthreads-2*lockout-1){
if (tid<2*lockout)
sdata[tid] = err[tid0 + tid];
sdata[tid+2*lockout] = err[2*lockout + tid0 + tid];
__syncthreads();
err0 = sdata[tid+lockout];
if(err0>1e-10){
flag = 0;
for(j=-lockout;j<=lockout;j++)
if(sdata[tid+lockout+j]>err0){
flag = 1;
break;
}
if(flag==0){
indx = atomicAdd(&counter[0], 1);
if (indx<maxFR){
st[indx] = tid+lockout + tid0;
id[indx] = ftype[tid+lockout + tid0];
x[indx] = xbest[tid+lockout + tid0];
C[indx] = err0;
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void subSpikes(const double *Params, const int *st, const int *id, const float *x, const int *counter, float *dout, const float *WtW){
int tid, bid, NT, ind, tcurr, Nfilt;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
Nfilt = (int) Params[1];
for(ind=counter[1]; ind<counter[0];ind++){
tcurr = tid + st[ind]-nt0+1;
if (tcurr>=0 & tcurr<NT)
dout[tcurr + bid*NT] -= x[ind] * WtW[tid + id[ind]*(2*nt0-1) + (2*nt0-1)*Nfilt*bid];
}
}
//////////////////////////////////////////////////////////////////////////////////////////
/*
* Host code
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare input variables*/
double *Params, *d_Params;
int blocksPerGrid, NT, maxFR, Nchan;
int const threadsPerBlock = Nthreads;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* read Params and copy to GPU */
Params = (double*) mxGetData(prhs[0]);
NT = (int) Params[0];
blocksPerGrid = (int) Params[1];
maxFR = (int) Params[3];
Nchan = (int) Params[5];
hipMalloc(&d_Params, sizeof(double)*mxGetNumberOfElements(prhs[0]));
hipMemcpy(d_Params,Params,sizeof(double)*mxGetNumberOfElements(prhs[0]),hipMemcpyHostToDevice);
/* collect input GPU variables*/
mxGPUArray const *W, *data, *WtW, *mu, *lam;
const float *d_W, *d_data, *d_WtW, *d_mu, *d_lam;
data = mxGPUCreateFromMxArray(prhs[1]);
d_data = (float const *)(mxGPUGetDataReadOnly(data));
W = mxGPUCreateFromMxArray(prhs[2]);
d_W = (float const *)(mxGPUGetDataReadOnly(W));
WtW = mxGPUCreateFromMxArray(prhs[3]);
d_WtW = (float const *)(mxGPUGetDataReadOnly(WtW));
mu = mxGPUCreateFromMxArray(prhs[4]);
d_mu = (float const *)(mxGPUGetDataReadOnly(mu));
lam = mxGPUCreateFromMxArray(prhs[5]);
d_lam = (float const *)(mxGPUGetDataReadOnly(lam));
/* allocate new GPU variables*/
float *d_err,*d_C, *d_xbest, *d_x, *d_dout;
int *d_st, *d_ftype, *d_id, *d_counter;
hipMalloc(&d_dout, NT * blocksPerGrid* sizeof(float));
hipMalloc(&d_err, NT * sizeof(float));
hipMalloc(&d_xbest, NT * sizeof(float));
hipMalloc(&d_ftype, NT * sizeof(int));
hipMalloc(&d_st, maxFR * sizeof(int));
hipMalloc(&d_id, maxFR * sizeof(int));
hipMalloc(&d_x, maxFR * sizeof(float));
hipMalloc(&d_C, maxFR * sizeof(float));
hipMalloc(&d_counter, 2*sizeof(int));
hipMemset(d_dout, 0, NT * blocksPerGrid * sizeof(float));
hipMemset(d_counter, 0, 2*sizeof(int));
hipMemset(d_st, 0, maxFR * sizeof(int));
hipMemset(d_id, 0, maxFR * sizeof(int));
hipMemset(d_x, 0, maxFR * sizeof(float));
hipMemset(d_C, 0, maxFR * sizeof(float));
int *counter;
counter = (int*) calloc(1,sizeof(int));
hipLaunchKernelGGL(( Conv1D), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_Params, d_data, d_W, d_dout);
for(int k=0;k<(int) Params[4];k++){
hipMemset(d_err, 0, NT * sizeof(float));
hipMemset(d_ftype, 0, NT * sizeof(int));
hipMemset(d_xbest, 0, NT * sizeof(float));
hipLaunchKernelGGL(( bestFilter), dim3(NT/Nthreads),dim3(threadsPerBlock), 0, 0, d_Params, d_dout, d_mu, d_lam, d_xbest, d_err, d_ftype);
hipLaunchKernelGGL(( cleanup_spikes), dim3(NT/Nthreads),dim3(threadsPerBlock), 0, 0, d_Params, d_xbest, d_err, d_ftype, d_st, d_id, d_x, d_C, d_counter);
hipMemcpy(counter, d_counter, sizeof(int), hipMemcpyDeviceToHost);
if (counter[0]>maxFR){
counter[0] = maxFR;
hipMemcpy(d_counter, counter, sizeof(int), hipMemcpyHostToDevice);
}
hipLaunchKernelGGL(( subSpikes), dim3(blocksPerGrid), dim3(2*nt0-1), 0, 0, d_Params, d_st, d_id, d_x, d_counter, d_dout, d_WtW);
hipMemcpy(d_counter+1, d_counter, sizeof(int), hipMemcpyDeviceToHost);
if(counter[0]==maxFR)
break;
}
float *x, *C;
int *st, *id;
int minSize;
if (counter[0]<maxFR) minSize = counter[0];
else minSize = maxFR;
const mwSize dimst[] = {minSize,1};
plhs[0] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL);
st = (int*) mxGetData(plhs[0]);
plhs[1] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL);
id = (int*) mxGetData(plhs[1]);
plhs[2] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL);
x = (float*) mxGetData(plhs[2]);
plhs[3] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL);
C = (float*) mxGetData(plhs[3]);
hipMemcpy(st, d_st, minSize * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(id, d_id, minSize * sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(x, d_x, minSize * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(C, d_C, minSize * sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_ftype);
hipFree(d_err);
hipFree(d_xbest);
hipFree(d_st);
hipFree(d_id);
hipFree(d_x);
hipFree(d_C);
hipFree(d_counter);
hipFree(d_Params);
hipFree(d_dout);
mxGPUDestroyGPUArray(data);
mxGPUDestroyGPUArray(WtW);
mxGPUDestroyGPUArray(W);
mxGPUDestroyGPUArray(mu);
mxGPUDestroyGPUArray(lam);
}
| fc4e79f5e94e52168b47ff153d5e4a0f83c64f1e.cu | /*
* Example of how to use the mxGPUArray API in a MEX file. This example shows
* how to write a MEX function that takes a gpuArray input and returns a
* gpuArray output, e.g. B=mexFunction(A).
*
* Copyright 2012 The MathWorks, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <stdint.h>
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cstdlib>
#include <algorithm>
#include <iostream>
using namespace std;
const int nt0 = 61, Nthreads = 1024, lockout = nt0-1, NchanMax = 128, block = 32, NrankMax = 6;
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void Conv1D(const double *Params, const float *data, const float *W, float *conv_sig){
__shared__ float sW[nt0*NrankMax], sdata[Nthreads+nt0];
float x;
int tid, tid0, bid, i, NT, Nfilt, NfiltALL;
tid = threadIdx.x;
bid = blockIdx.x;
Nfilt = (int) Params[1];
NT = (int) Params[0];
NfiltALL = Nfilt * ((int) Params[6]);
if(tid<nt0*((int) Params[6]))
sW[tid]= W[tid%nt0 + (bid + Nfilt * (tid/nt0))* nt0];
__syncthreads();
tid0 = 0;
while (tid0<NT-Nthreads-nt0+1){
if (tid<nt0) sdata[tid] = data[tid0 + tid+ NT*bid];
sdata[nt0+tid] = data[nt0+tid0 + tid+ NT*bid];
__syncthreads();
x = 0.0f;
while(bid<NfiltALL){
for(i=0;i<nt0;i++)
x += sW[i + (bid/Nfilt)*nt0] * sdata[i+tid];
bid+=Nfilt;
}
bid = blockIdx.x;
conv_sig[tid0 + tid + NT*bid] = x;
tid0+=Nthreads;
__syncthreads();
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void bestFilter(const double *Params, const float *data,
const float *mu, const float *lam, float *xbest, float *err, int *ftype){
int tid, tid0, i, bid, NT, Nfilt, ibest = 0;
float Th, Cf, Ci, xb, Cbest = 0.0f;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
Nfilt = (int) Params[1];
Th = (float) Params[2];
tid0 = tid + bid * Nthreads;
if (tid0<NT){
for (i=0; i<Nfilt;i++){
Ci = data[tid0 + NT * i];
Cf = 0.0f;
if (abs(Ci - mu[i]) < lam[i]*mu[i])
Cf = Ci * Ci;
if (Cf > Cbest){
Cbest = Cf;
xb = data[tid0 + NT * i]; //Ci / (lam[i] + 1);
ibest = i;
}
}
if (Cbest > Th*Th){
err[tid0] = Cbest;
xbest[tid0] = xb;
ftype[tid0] = ibest;
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void cleanup_spikes(const double *Params, const float *xbest, const float *err,
const int *ftype, int *st, int *id, float *x, float *C, int *counter){
int indx, maxFR, NTOT, tid, bid, NT, tid0, j;
volatile __shared__ float sdata[Nthreads+2*lockout+1];
bool flag=0;
float err0;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
maxFR = (int) Params[3];
tid0 = bid * Nthreads;
if(tid0<NT-Nthreads-2*lockout-1){
if (tid<2*lockout)
sdata[tid] = err[tid0 + tid];
sdata[tid+2*lockout] = err[2*lockout + tid0 + tid];
__syncthreads();
err0 = sdata[tid+lockout];
if(err0>1e-10){
flag = 0;
for(j=-lockout;j<=lockout;j++)
if(sdata[tid+lockout+j]>err0){
flag = 1;
break;
}
if(flag==0){
indx = atomicAdd(&counter[0], 1);
if (indx<maxFR){
st[indx] = tid+lockout + tid0;
id[indx] = ftype[tid+lockout + tid0];
x[indx] = xbest[tid+lockout + tid0];
C[indx] = err0;
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void subSpikes(const double *Params, const int *st, const int *id, const float *x, const int *counter, float *dout, const float *WtW){
int tid, bid, NT, ind, tcurr, Nfilt;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
Nfilt = (int) Params[1];
for(ind=counter[1]; ind<counter[0];ind++){
tcurr = tid + st[ind]-nt0+1;
if (tcurr>=0 & tcurr<NT)
dout[tcurr + bid*NT] -= x[ind] * WtW[tid + id[ind]*(2*nt0-1) + (2*nt0-1)*Nfilt*bid];
}
}
//////////////////////////////////////////////////////////////////////////////////////////
/*
* Host code
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare input variables*/
double *Params, *d_Params;
int blocksPerGrid, NT, maxFR, Nchan;
int const threadsPerBlock = Nthreads;
/* Initialize the MathWorks GPU API. */
mxInitGPU();
/* read Params and copy to GPU */
Params = (double*) mxGetData(prhs[0]);
NT = (int) Params[0];
blocksPerGrid = (int) Params[1];
maxFR = (int) Params[3];
Nchan = (int) Params[5];
cudaMalloc(&d_Params, sizeof(double)*mxGetNumberOfElements(prhs[0]));
cudaMemcpy(d_Params,Params,sizeof(double)*mxGetNumberOfElements(prhs[0]),cudaMemcpyHostToDevice);
/* collect input GPU variables*/
mxGPUArray const *W, *data, *WtW, *mu, *lam;
const float *d_W, *d_data, *d_WtW, *d_mu, *d_lam;
data = mxGPUCreateFromMxArray(prhs[1]);
d_data = (float const *)(mxGPUGetDataReadOnly(data));
W = mxGPUCreateFromMxArray(prhs[2]);
d_W = (float const *)(mxGPUGetDataReadOnly(W));
WtW = mxGPUCreateFromMxArray(prhs[3]);
d_WtW = (float const *)(mxGPUGetDataReadOnly(WtW));
mu = mxGPUCreateFromMxArray(prhs[4]);
d_mu = (float const *)(mxGPUGetDataReadOnly(mu));
lam = mxGPUCreateFromMxArray(prhs[5]);
d_lam = (float const *)(mxGPUGetDataReadOnly(lam));
/* allocate new GPU variables*/
float *d_err,*d_C, *d_xbest, *d_x, *d_dout;
int *d_st, *d_ftype, *d_id, *d_counter;
cudaMalloc(&d_dout, NT * blocksPerGrid* sizeof(float));
cudaMalloc(&d_err, NT * sizeof(float));
cudaMalloc(&d_xbest, NT * sizeof(float));
cudaMalloc(&d_ftype, NT * sizeof(int));
cudaMalloc(&d_st, maxFR * sizeof(int));
cudaMalloc(&d_id, maxFR * sizeof(int));
cudaMalloc(&d_x, maxFR * sizeof(float));
cudaMalloc(&d_C, maxFR * sizeof(float));
cudaMalloc(&d_counter, 2*sizeof(int));
cudaMemset(d_dout, 0, NT * blocksPerGrid * sizeof(float));
cudaMemset(d_counter, 0, 2*sizeof(int));
cudaMemset(d_st, 0, maxFR * sizeof(int));
cudaMemset(d_id, 0, maxFR * sizeof(int));
cudaMemset(d_x, 0, maxFR * sizeof(float));
cudaMemset(d_C, 0, maxFR * sizeof(float));
int *counter;
counter = (int*) calloc(1,sizeof(int));
Conv1D<<<blocksPerGrid,threadsPerBlock>>>(d_Params, d_data, d_W, d_dout);
for(int k=0;k<(int) Params[4];k++){
cudaMemset(d_err, 0, NT * sizeof(float));
cudaMemset(d_ftype, 0, NT * sizeof(int));
cudaMemset(d_xbest, 0, NT * sizeof(float));
bestFilter<<<NT/Nthreads,threadsPerBlock>>>( d_Params, d_dout, d_mu, d_lam, d_xbest, d_err, d_ftype);
cleanup_spikes<<<NT/Nthreads,threadsPerBlock>>>(d_Params, d_xbest, d_err, d_ftype, d_st, d_id, d_x, d_C, d_counter);
cudaMemcpy(counter, d_counter, sizeof(int), cudaMemcpyDeviceToHost);
if (counter[0]>maxFR){
counter[0] = maxFR;
cudaMemcpy(d_counter, counter, sizeof(int), cudaMemcpyHostToDevice);
}
subSpikes<<<blocksPerGrid, 2*nt0-1>>>(d_Params, d_st, d_id, d_x, d_counter, d_dout, d_WtW);
cudaMemcpy(d_counter+1, d_counter, sizeof(int), cudaMemcpyDeviceToHost);
if(counter[0]==maxFR)
break;
}
float *x, *C;
int *st, *id;
int minSize;
if (counter[0]<maxFR) minSize = counter[0];
else minSize = maxFR;
const mwSize dimst[] = {minSize,1};
plhs[0] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL);
st = (int*) mxGetData(plhs[0]);
plhs[1] = mxCreateNumericArray(2, dimst, mxINT32_CLASS, mxREAL);
id = (int*) mxGetData(plhs[1]);
plhs[2] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL);
x = (float*) mxGetData(plhs[2]);
plhs[3] = mxCreateNumericArray(2, dimst, mxSINGLE_CLASS, mxREAL);
C = (float*) mxGetData(plhs[3]);
cudaMemcpy(st, d_st, minSize * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(id, d_id, minSize * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(x, d_x, minSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(C, d_C, minSize * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_ftype);
cudaFree(d_err);
cudaFree(d_xbest);
cudaFree(d_st);
cudaFree(d_id);
cudaFree(d_x);
cudaFree(d_C);
cudaFree(d_counter);
cudaFree(d_Params);
cudaFree(d_dout);
mxGPUDestroyGPUArray(data);
mxGPUDestroyGPUArray(WtW);
mxGPUDestroyGPUArray(W);
mxGPUDestroyGPUArray(mu);
mxGPUDestroyGPUArray(lam);
}
|
85275703d530d492f9be095e3e1ed9550429c1ef.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "DeviceDefs.cuh"
#include "WarpSelectImpl_hip.cuh"
namespace faiss {
namespace gpu {
// warp Q to thread Q:
// 1, 1
// 32, 2
// 64, 3
// 128, 3
// 256, 4
// 512, 8
// 1024, 8
// 2048, 8
WARP_SELECT_DECL(float, true, 1);
WARP_SELECT_DECL(float, true, 32);
WARP_SELECT_DECL(float, true, 64);
WARP_SELECT_DECL(float, true, 128);
WARP_SELECT_DECL(float, true, 256);
WARP_SELECT_DECL(float, true, 512);
WARP_SELECT_DECL(float, true, 1024);
#if GPU_MAX_SELECTION_K >= 2048
WARP_SELECT_DECL(float, true, 2048);
#endif
WARP_SELECT_DECL(float, false, 1);
WARP_SELECT_DECL(float, false, 32);
WARP_SELECT_DECL(float, false, 64);
WARP_SELECT_DECL(float, false, 128);
WARP_SELECT_DECL(float, false, 256);
WARP_SELECT_DECL(float, false, 512);
WARP_SELECT_DECL(float, false, 1024);
#if GPU_MAX_SELECTION_K >= 2048
WARP_SELECT_DECL(float, false, 2048);
#endif
void runWarpSelect(
Tensor<float, 2, true>& in,
Tensor<float, 2, true>& outK,
Tensor<int, 2, true>& outV,
bool dir,
int k,
hipStream_t stream) {
FAISS_ASSERT(k <= 2048);
if (dir) {
if (k == 1) {
WARP_SELECT_CALL(float, true, 1);
} else if (k <= 32) {
WARP_SELECT_CALL(float, true, 32);
} else if (k <= 64) {
WARP_SELECT_CALL(float, true, 64);
} else if (k <= 128) {
WARP_SELECT_CALL(float, true, 128);
} else if (k <= 256) {
WARP_SELECT_CALL(float, true, 256);
} else if (k <= 512) {
WARP_SELECT_CALL(float, true, 512);
} else if (k <= 1024) {
WARP_SELECT_CALL(float, true, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
WARP_SELECT_CALL(float, true, 2048);
#endif
}
} else {
if (k == 1) {
WARP_SELECT_CALL(float, false, 1);
} else if (k <= 32) {
WARP_SELECT_CALL(float, false, 32);
} else if (k <= 64) {
WARP_SELECT_CALL(float, false, 64);
} else if (k <= 128) {
WARP_SELECT_CALL(float, false, 128);
} else if (k <= 256) {
WARP_SELECT_CALL(float, false, 256);
} else if (k <= 512) {
WARP_SELECT_CALL(float, false, 512);
} else if (k <= 1024) {
WARP_SELECT_CALL(float, false, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
WARP_SELECT_CALL(float, false, 2048);
#endif
}
}
}
} // namespace gpu
} // namespace faiss
| 85275703d530d492f9be095e3e1ed9550429c1ef.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "DeviceDefs.cuh"
#include "WarpSelectImpl.cuh"
namespace faiss {
namespace gpu {
// warp Q to thread Q:
// 1, 1
// 32, 2
// 64, 3
// 128, 3
// 256, 4
// 512, 8
// 1024, 8
// 2048, 8
WARP_SELECT_DECL(float, true, 1);
WARP_SELECT_DECL(float, true, 32);
WARP_SELECT_DECL(float, true, 64);
WARP_SELECT_DECL(float, true, 128);
WARP_SELECT_DECL(float, true, 256);
WARP_SELECT_DECL(float, true, 512);
WARP_SELECT_DECL(float, true, 1024);
#if GPU_MAX_SELECTION_K >= 2048
WARP_SELECT_DECL(float, true, 2048);
#endif
WARP_SELECT_DECL(float, false, 1);
WARP_SELECT_DECL(float, false, 32);
WARP_SELECT_DECL(float, false, 64);
WARP_SELECT_DECL(float, false, 128);
WARP_SELECT_DECL(float, false, 256);
WARP_SELECT_DECL(float, false, 512);
WARP_SELECT_DECL(float, false, 1024);
#if GPU_MAX_SELECTION_K >= 2048
WARP_SELECT_DECL(float, false, 2048);
#endif
void runWarpSelect(
Tensor<float, 2, true>& in,
Tensor<float, 2, true>& outK,
Tensor<int, 2, true>& outV,
bool dir,
int k,
hipStream_t stream) {
FAISS_ASSERT(k <= 2048);
if (dir) {
if (k == 1) {
WARP_SELECT_CALL(float, true, 1);
} else if (k <= 32) {
WARP_SELECT_CALL(float, true, 32);
} else if (k <= 64) {
WARP_SELECT_CALL(float, true, 64);
} else if (k <= 128) {
WARP_SELECT_CALL(float, true, 128);
} else if (k <= 256) {
WARP_SELECT_CALL(float, true, 256);
} else if (k <= 512) {
WARP_SELECT_CALL(float, true, 512);
} else if (k <= 1024) {
WARP_SELECT_CALL(float, true, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
WARP_SELECT_CALL(float, true, 2048);
#endif
}
} else {
if (k == 1) {
WARP_SELECT_CALL(float, false, 1);
} else if (k <= 32) {
WARP_SELECT_CALL(float, false, 32);
} else if (k <= 64) {
WARP_SELECT_CALL(float, false, 64);
} else if (k <= 128) {
WARP_SELECT_CALL(float, false, 128);
} else if (k <= 256) {
WARP_SELECT_CALL(float, false, 256);
} else if (k <= 512) {
WARP_SELECT_CALL(float, false, 512);
} else if (k <= 1024) {
WARP_SELECT_CALL(float, false, 1024);
#if GPU_MAX_SELECTION_K >= 2048
} else if (k <= 2048) {
WARP_SELECT_CALL(float, false, 2048);
#endif
}
}
}
} // namespace gpu
} // namespace faiss
|
a218cc74430a0494c3a065e1b1bc6f4c9a407bfe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex mob(hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the1(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ hipComplex the2(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ hipComplex the3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex the4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ hipComplex qin(hipComplex a, hipComplex q)
{
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ hipComplex geffa(hipComplex z, hipComplex q)
{
hipComplex out(0.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex wu(0.0,0.0);
hipComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ hipComplex thratd(hipComplex z, hipComplex q)
{
int n;
hipComplex fau(4.0,0.0);
hipComplex too(2.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex ennn(1.0,0.0);
hipComplex ni(-1.0,0.0);
hipComplex noo(-1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex loo = q;
hipComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ hipComplex mitlef(hipComplex z,hipComplex c)
{
hipComplex out(0.0,0.0);
hipComplex Z(1.0,0.0);
hipComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ hipComplex helva(hipComplex z)
{
hipComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex hylva(hipComplex z)
{
hipComplex out(j1f(1/j0f(z.r)),j1f(1/j0f(z.i)));
return out;
}
__device__ hipComplex hilva(hipComplex z)
{
hipComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex halva(hipComplex z)
{
hipComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ hipComplex hinva(hipComplex z)
{
hipComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ hipComplex henga(hipComplex z)
{
hipComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ hipComplex holva(hipComplex z)
{
hipComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ hipComplex aliva(hipComplex z)
{
hipComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ hipComplex ariva(hipComplex z)
{
hipComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ hipComplex arago(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex irigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex urigo(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex arreg(hipComplex q, hipComplex r, hipComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
hipComplex out(0.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
hipComplex morra(-1.0,0.0);
hipComplex tla(1.0,0.0);
hipComplex vnn(0.0,0.0);
hipComplex fou(4.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex run(1.0,0.0);
int v;
for(v=0;v<20;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*sins(tw*z*run)/(run-roo);
}
return fou*out;
}
__device__ hipComplex urreg(hipComplex q, hipComplex r, hipComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
hipComplex out(0.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
hipComplex morra(-1.0,0.0);
hipComplex tla(1.0,0.0);
hipComplex vnn(0.0,0.0);
hipComplex fou(4.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex run(1.0,0.0);
int v;
for(v=0;v<10;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*the3(tw*z*run,r)/(run-roo);
}
return fou*out;
}
// * small q-exponential
__device__ hipComplex qexp(hipComplex z, hipComplex q)
{
hipComplex mone(-1.0,0.0);
hipComplex une(1.0,0.0);
return une/qpoch(z,q);
}
//* large q exponential is just qpoch(-z,q)
__device__ hipComplex qExp(hipComplex z, hipComplex q)
{
hipComplex mone(-1.0,0.0);
hipComplex une(1.0,0.0);
return qpoch(mone*z,q);
}
__device__ hipComplex sinq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qexp(z*aie,q) -qexp(z*aie,q))/doo;
return out;
}
__device__ hipComplex cosq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qexp(z*aie,q) +qexp(z*aie,q))/doo;
return out;
}
__device__ hipComplex Sinq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qExp(z*aie,q) -qExp(z*aie,q))/doo;
return out;
}
__device__ hipComplex Cosq(hipComplex z, hipComplex q)
{
hipComplex aie(0.0,1.0);
hipComplex out(0.0,0.0);
hipComplex doo(2.0,0.0);
out = (qExp(z*aie,q) +qExp(z*aie,q))/doo;
return out;
}
__device__ hipComplex asins(hipComplex z)
{
float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float fla = z.i/abs(z.i);
// *signum, but without a comparison, probably a saner way to do this? //
hipComplex out(0.0,0.0);
out.r = asinf(bet);
out.i = fla * logf(alp + sqrtf(alp*alp-1));
return out;
}
__device__ int gcd(int a, int b)
{
int remainder = a % b;
if (remainder == 0) {
return b;
}
return gcd(b, remainder);
}
/* Real Analytic Eisenstein Series */
__device__ hipComplex reis(hipComplex s, hipComplex z)
{
// see en.wikipedia.org/wiki/Real_analytic_Eisenstein_series
hipComplex out(0.0,0.0);
hipComplex hav(0.5,0.0);
hipComplex xu=out;
hipComplex yu=out;
yu.r = z.i;
int m,n;
hipComplex ema=out;
hipComplex ena=out;
hipComplex den=out;
for(m=-20;m<20;m++)
{
for(n=-20;n<20;n++)
{
if((m!=0)&&(n!=0))
{
if((gcd(m,n)==1))
{
ena.r = n;
ema.r = m;
den.r = norg(ema*z+ena);
out = out + powc(yu,s)/powc(den,s/hav);
}
}
}
}
return out;
}
__device__ hipComplex thu3(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * asins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex trev(hipComplex lav, hipComplex mel, hipComplex rel)
{
hipComplex out(0.0,0.0);
hipComplex V(0.739085133215160641655312087674,0.0);
int v;
for(v=0;v<3;v++)
{
lav = lav - rel*(cosc(lav)-powc(V,rel))/cosc(lav);
out = out + mel*(cosc(lav)-powc(V,mel));
}
return out;
}
__device__ hipComplex polylog(hipComplex z, hipComplex s)
{
hipComplex out(0.0,0.0);
hipComplex oom(1.0,0.0);
hipComplex flag=oom;
int v;
for(v=0;v<30;v++)
{
flag = flag + oom;
out = out + powc(z,flag)/powc(flag,s);
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex ip(pi,0.0);
const float scale =
14.3;
float fx = scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
hipComplex effx(fx,0.0);
hipComplex effy(fy,0.0);
float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(1.0,0.0);
hipComplex eccume(1.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(0.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (4.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
hipComplex gren(2.0,0.0);
hipComplex next=flurn;
hipComplex current = cue;
hipComplex xnext = flurn;
hipComplex xcurrent = cue;
hipComplex rue=cue;
hipComplex tinny(.0001,0.0001);
hipComplex raga(0.5,27.0);
hipComplex ruga(0.5,0.0);
hipComplex senna(2.0,0.0);
hipComplex renna(3.0,0.0);
hipComplex finch(0.001,.001);
hipComplex smenn(0.5,sqrtf(3.0)/2.0);
hipComplex lmenn(0.96592582628906831,0.25881904510252074);
hipComplex vmenn(-0.5,-sqrtf(3.0)/2.0);
float ah, ex, feig;
feig = 3.67;
ex = 2.10981;
float xa,xb,ya,yb,tta,ttb;
char va,vb,vc;
hipComplex seahorse(-0.75,0.123);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
int uu;
/* encode the nome in q */
cue = mob(mouse,cue);
cue = the3(q,cue);
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// One way of describing this would be we want to perform Newton's method
//on the Mandelbrot set
/* preiterate */
//tex.stackexchange.com/questions/278843/making-a-phase-portrait-of-two-autonomous-system-of-differential-equations-with?fbclid=IwAR2Tz66CbUAq7LFVYck4uUGF5uQWnmzf5iZw3Bi8IOycvCC7czO6ZVgkz3s
// this is not terribly hard to do with cuda
// what we need:
// x' = x - y -> dx / dt = x - y
// y' = 1 - x^2 -> dy / dt = 1-x^2
// dy / dx = (dy / dt) / (dx/ dt)
// so the trick is to convert dy/dx into a unit complex number to make this work, okay that's not that difficult
{
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ | a218cc74430a0494c3a065e1b1bc6f4c9a407bfe.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex mob(cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the1(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*sins(z);
}
__device__ cuComplex the2(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex rt(0.25,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return tw*out*powc(q,rt)*cosc(z);
}
__device__ cuComplex the3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex the4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
/* routine to generate q-integers */
__device__ cuComplex qin(cuComplex a, cuComplex q)
{
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
out = (unity - powc(q, a))/(unity-q);
return out;
}
/* generating function for n^2 */
__device__ cuComplex geffa(cuComplex z, cuComplex q)
{
cuComplex out(0.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex wu(0.0,0.0);
cuComplex Z=unity;
int v;
for(v=0;v<20;v++)
{
out = out + qin(wu*wu,q)* Z;
wu = wu + unity;
Z = z * Z;
}
return out;
}
__device__ cuComplex thratd(cuComplex z, cuComplex q)
{
int n;
cuComplex fau(4.0,0.0);
cuComplex too(2.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex ennn(1.0,0.0);
cuComplex ni(-1.0,0.0);
cuComplex noo(-1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex loo = q;
cuComplex qoo =q*q;
for(n=0;n<80;n++)
{
out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z);
qoo = qoo * q*q;
loo = loo * q;
ennn = ennn +unity;
noo = ni * noo;
}
return out*fau;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__ cuComplex mitlef(cuComplex z,cuComplex c)
{
cuComplex out(0.0,0.0);
cuComplex Z(1.0,0.0);
cuComplex frove(0.0,0.0);
int v;
for(v=0;v<20;v++)
{
frove.r = tgammaf(c.r*v+c.i);
out = out + Z/frove;
Z = Z * z;
}
return out;
}
__device__ cuComplex helva(cuComplex z)
{
cuComplex out(j0f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex hylva(cuComplex z)
{
cuComplex out(j1f(1/j0f(z.r)),j1f(1/j0f(z.i)));
return out;
}
__device__ cuComplex hilva(cuComplex z)
{
cuComplex out(j1f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex halva(cuComplex z)
{
cuComplex out(j0f(z.r),j0f(z.i));
return out;
}
__device__ cuComplex hinva(cuComplex z)
{
cuComplex out(j1f(z.r),j1f(z.i));
return out;
}
__device__ cuComplex henga(cuComplex z)
{
cuComplex out(acoshf(z.r),asinhf(z.i));
return out;
}
__device__ cuComplex holva(cuComplex z)
{
cuComplex out(y0f(z.r),y1f(z.i));
return out;
}
__device__ cuComplex aliva(cuComplex z)
{
cuComplex out(j1f(z.r),cyl_bessel_i1f(z.i));
return out;
}
__device__ cuComplex ariva(cuComplex z)
{
cuComplex out(sinf(z.i),cbrtf(z.r));
return out;
}
__device__ cuComplex arago(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex irigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex urigo(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * powc(hilva(q*z),helva(q*z)) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex arreg(cuComplex q, cuComplex r, cuComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
cuComplex out(0.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
cuComplex morra(-1.0,0.0);
cuComplex tla(1.0,0.0);
cuComplex vnn(0.0,0.0);
cuComplex fou(4.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex run(1.0,0.0);
int v;
for(v=0;v<20;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*sins(tw*z*run)/(run-roo);
}
return fou*out;
}
__device__ cuComplex urreg(cuComplex q, cuComplex r, cuComplex z)
{
/* arreg implements the representation of theta3'(z)/theta(z) I don't know if these are
derivatives with respect to z or q, we'll see */
cuComplex out(0.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
cuComplex morra(-1.0,0.0);
cuComplex tla(1.0,0.0);
cuComplex vnn(0.0,0.0);
cuComplex fou(4.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex run(1.0,0.0);
int v;
for(v=0;v<10;v++)
{
qoo = qoo * q;
roo = roo * r * r;
tla = tla * morra;
vnn = vnn + run;
out = out + morra*qoo*the3(tw*z*run,r)/(run-roo);
}
return fou*out;
}
// * small q-exponential
__device__ cuComplex qexp(cuComplex z, cuComplex q)
{
cuComplex mone(-1.0,0.0);
cuComplex une(1.0,0.0);
return une/qpoch(z,q);
}
//* large q exponential is just qpoch(-z,q)
__device__ cuComplex qExp(cuComplex z, cuComplex q)
{
cuComplex mone(-1.0,0.0);
cuComplex une(1.0,0.0);
return qpoch(mone*z,q);
}
__device__ cuComplex sinq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qexp(z*aie,q) -qexp(z*aie,q))/doo;
return out;
}
__device__ cuComplex cosq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qexp(z*aie,q) +qexp(z*aie,q))/doo;
return out;
}
__device__ cuComplex Sinq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qExp(z*aie,q) -qExp(z*aie,q))/doo;
return out;
}
__device__ cuComplex Cosq(cuComplex z, cuComplex q)
{
cuComplex aie(0.0,1.0);
cuComplex out(0.0,0.0);
cuComplex doo(2.0,0.0);
out = (qExp(z*aie,q) +qExp(z*aie,q))/doo;
return out;
}
__device__ cuComplex asins(cuComplex z)
{
float alp = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) + sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float bet = 0.5 * (sqrtf((z.r+1)*(z.r+1) + z.i*z.i) - sqrtf((z.r-1)*(z.r-1) + z.i*z.i));
float fla = z.i/abs(z.i);
// *signum, but without a comparison, probably a saner way to do this? //
cuComplex out(0.0,0.0);
out.r = asinf(bet);
out.i = fla * logf(alp + sqrtf(alp*alp-1));
return out;
}
__device__ int gcd(int a, int b)
{
int remainder = a % b;
if (remainder == 0) {
return b;
}
return gcd(b, remainder);
}
/* Real Analytic Eisenstein Series */
__device__ cuComplex reis(cuComplex s, cuComplex z)
{
// see en.wikipedia.org/wiki/Real_analytic_Eisenstein_series
cuComplex out(0.0,0.0);
cuComplex hav(0.5,0.0);
cuComplex xu=out;
cuComplex yu=out;
yu.r = z.i;
int m,n;
cuComplex ema=out;
cuComplex ena=out;
cuComplex den=out;
for(m=-20;m<20;m++)
{
for(n=-20;n<20;n++)
{
if((m!=0)&&(n!=0))
{
if((gcd(m,n)==1))
{
ena.r = n;
ema.r = m;
den.r = norg(ema*z+ena);
out = out + powc(yu,s)/powc(den,s/hav);
}
}
}
}
return out;
}
__device__ cuComplex thu3(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * asins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex trev(cuComplex lav, cuComplex mel, cuComplex rel)
{
cuComplex out(0.0,0.0);
cuComplex V(0.739085133215160641655312087674,0.0);
int v;
for(v=0;v<3;v++)
{
lav = lav - rel*(cosc(lav)-powc(V,rel))/cosc(lav);
out = out + mel*(cosc(lav)-powc(V,mel));
}
return out;
}
__device__ cuComplex polylog(cuComplex z, cuComplex s)
{
cuComplex out(0.0,0.0);
cuComplex oom(1.0,0.0);
cuComplex flag=oom;
int v;
for(v=0;v<30;v++)
{
flag = flag + oom;
out = out + powc(z,flag)/powc(flag,s);
}
return out;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex ip(pi,0.0);
const float scale =
14.3;
float fx = scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
cuComplex effx(fx,0.0);
cuComplex effy(fy,0.0);
float LA = scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(1.0,0.0);
cuComplex eccume(1.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(0.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (4.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
cuComplex gren(2.0,0.0);
cuComplex next=flurn;
cuComplex current = cue;
cuComplex xnext = flurn;
cuComplex xcurrent = cue;
cuComplex rue=cue;
cuComplex tinny(.0001,0.0001);
cuComplex raga(0.5,27.0);
cuComplex ruga(0.5,0.0);
cuComplex senna(2.0,0.0);
cuComplex renna(3.0,0.0);
cuComplex finch(0.001,.001);
cuComplex smenn(0.5,sqrtf(3.0)/2.0);
cuComplex lmenn(0.96592582628906831,0.25881904510252074);
cuComplex vmenn(-0.5,-sqrtf(3.0)/2.0);
float ah, ex, feig;
feig = 3.67;
ex = 2.10981;
float xa,xb,ya,yb,tta,ttb;
char va,vb,vc;
cuComplex seahorse(-0.75,0.123);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
int uu;
/* encode the nome in q */
cue = mob(mouse,cue);
cue = the3(q,cue);
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
// One way of describing this would be we want to perform Newton's method
//on the Mandelbrot set
/* preiterate */
//tex.stackexchange.com/questions/278843/making-a-phase-portrait-of-two-autonomous-system-of-differential-equations-with?fbclid=IwAR2Tz66CbUAq7LFVYck4uUGF5uQWnmzf5iZw3Bi8IOycvCC7czO6ZVgkz3s
// this is not terribly hard to do with cuda
// what we need:
// x' = x - y -> dx / dt = x - y
// y' = 1 - x^2 -> dy / dt = 1-x^2
// dy / dx = (dy / dt) / (dx/ dt)
// so the trick is to convert dy/dx into a unit complex number to make this work, okay that's not that difficult
{
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
}
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/ |
f1585d42bcab0e7f83679259dcd9f862b6b80e71.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int k = blockIdx.x*blockDim.x+threadIdx.x;
if(k>=n)
return;
if(idata[k]!=0)
bools[k]=1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int k = blockIdx.x*blockDim.x+threadIdx.x;
if(k>=n)
return;
if(bools[k]==1)
odata[indices[k]] = idata[k];
}
}
}
| f1585d42bcab0e7f83679259dcd9f862b6b80e71.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
// TODO
int k = blockIdx.x*blockDim.x+threadIdx.x;
if(k>=n)
return;
if(idata[k]!=0)
bools[k]=1;
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
// TODO
int k = blockIdx.x*blockDim.x+threadIdx.x;
if(k>=n)
return;
if(bools[k]==1)
odata[indices[k]] = idata[k];
}
}
}
|
f7d04352ee9cd1b45a222eaed9f33e84ff9cd33e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//xfail:BOOGIE_ERROR
//--blockDim=16 --gridDim=16 --no-inline
//a = 12
//b = 36
//c = 48
__global__ void example(unsigned a, unsigned b, unsigned c) {
__requires(a == 12);
__requires(b == 36);
__assert(a + b != c);
}
| f7d04352ee9cd1b45a222eaed9f33e84ff9cd33e.cu | //xfail:BOOGIE_ERROR
//--blockDim=16 --gridDim=16 --no-inline
//a = 12
//b = 36
//c = 48
__global__ void example(unsigned a, unsigned b, unsigned c) {
__requires(a == 12);
__requires(b == 36);
__assert(a + b != c);
}
|
b8ea27c8561615b65c66493bef66e98836c3e335.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include"utils.h"
__device__
unsigned long int getOffset_DEVICE(int * indices, int * dimensions, int num_dimensions){
/*
* Function: getOffset
* -------------------
* compute the offset from the base address of an array for given indices corresponding
* to dimensions.
* indices: pointer to array of indices e.g A[1,2,3]
* dimensions: pointer to array of dimensions e.g dimensionality of A = 2x3x4
* num_dimensions: number of dimensions e.g dimensions(A) = 3
*
* returns:
* -------
* offset: the offset from the base memory address. (NOTE: independant of datatype)
*/
unsigned long int offset = 0.0;
for(int indexIdx=0; indexIdx<num_dimensions; indexIdx++){
unsigned long int product = indices[indexIdx];
for(int dimIdx=indexIdx+1; dimIdx<num_dimensions; dimIdx ++){
product = product * dimensions[dimIdx];
}
offset = offset + product;
}
return offset;
}
__global__
void KERNEL_max(float * d_out, int * d_out_DIMS, float * d_in, int * d_in_DIMS)
{
//printf("Block x: %d , y %d\n",blockIdx.x, blockIdx.y);
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
int indices[] = {0,row,col};
// bounds checking
if ( (indices[1]>=d_in_DIMS[1]) || (indices[2]>=d_in_DIMS[2]) ){
return;
}
float max_value = *(d_in + getOffset_DEVICE(indices, d_in_DIMS, 3));
int DEPTH = d_in_DIMS[0];
for(int i=0; i<DEPTH; i++){
int indices2[] = {i, row, col};
if ( max_value < *(d_in+getOffset_DEVICE(indices2, d_in_DIMS, 3)) ){
max_value = *(d_in+getOffset_DEVICE(indices2, d_in_DIMS, 3));
}
}
// store the max value in the (row,col) location of d_out
int indices3[] = {row,col};
*(d_out + getOffset_DEVICE(indices3, d_out_DIMS, 2)) = max_value;
}
int main()
{
srand(time(0));
const int HEIGHT=6;
const int WIDTH=7;
const int DEPTH=2;
// input array (host)
int h_in_DIMS[] = {DEPTH, HEIGHT, WIDTH};
float * h_in = new3dArray(DEPTH, HEIGHT, WIDTH);
randFill3d(h_in, h_in_DIMS);
printArr3d(h_in, h_in_DIMS);
// output array (host)
int h_out_DIMS[] = {HEIGHT, WIDTH};
float * h_out = new2dArray(HEIGHT, WIDTH);
randFill2d(h_out, h_out_DIMS);
// max value
//KERNEL_max(h_out, h_out_DIMS, h_in, h_in_DIMS, 2,3);
// Split the computation up into multiple tiles
const int MAX_TILE_DIM = 4;
int NUM_TILES_X = ceil(float(WIDTH) / MAX_TILE_DIM);
int NUM_TILES_Y = ceil(float(HEIGHT) / MAX_TILE_DIM);
dim3 grid(NUM_TILES_X, NUM_TILES_Y);
printf("\nX_TILES:%d Y_TILEs: %d \n",NUM_TILES_X, NUM_TILES_Y);
float * d_in;
float * d_out;
int * d_out_DIMS;
int * d_in_DIMS;
hipMalloc((void **) &d_in, DEPTH*HEIGHT*WIDTH*sizeof(float));
hipMalloc((void **) &d_out, HEIGHT*WIDTH*sizeof(float));
hipMalloc((void **) &d_out_DIMS, 2*sizeof(int));
hipMalloc((void **) &d_in_DIMS, 3*sizeof(int));
hipMemcpy(d_in, h_in, DEPTH*HEIGHT*WIDTH*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_in_DIMS, h_in_DIMS, 3*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_out_DIMS, h_out_DIMS, 2*sizeof(int), hipMemcpyHostToDevice);
//dim3 block(WIDTH, HEIGHT);
dim3 block(MAX_TILE_DIM, MAX_TILE_DIM);
hipLaunchKernelGGL(( KERNEL_max), dim3(grid), dim3(block), 0, 0, d_out, d_out_DIMS, d_in, d_in_DIMS);
hipMemcpy(h_out, d_out, HEIGHT*WIDTH*sizeof(float), hipMemcpyDeviceToHost);
printf("\n Final channel pooled output -->\n");
printArr2d(h_out, h_out_DIMS);
hipFree(d_in);
hipFree(d_out);
free(h_in);
free(h_out);
return 0;
}
| b8ea27c8561615b65c66493bef66e98836c3e335.cu | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include"utils.h"
__device__
unsigned long int getOffset_DEVICE(int * indices, int * dimensions, int num_dimensions){
/*
* Function: getOffset
* -------------------
* compute the offset from the base address of an array for given indices corresponding
* to dimensions.
* indices: pointer to array of indices e.g A[1,2,3]
* dimensions: pointer to array of dimensions e.g dimensionality of A = 2x3x4
* num_dimensions: number of dimensions e.g dimensions(A) = 3
*
* returns:
* -------
* offset: the offset from the base memory address. (NOTE: independant of datatype)
*/
unsigned long int offset = 0.0;
for(int indexIdx=0; indexIdx<num_dimensions; indexIdx++){
unsigned long int product = indices[indexIdx];
for(int dimIdx=indexIdx+1; dimIdx<num_dimensions; dimIdx ++){
product = product * dimensions[dimIdx];
}
offset = offset + product;
}
return offset;
}
__global__
void KERNEL_max(float * d_out, int * d_out_DIMS, float * d_in, int * d_in_DIMS)
{
//printf("Block x: %d , y %d\n",blockIdx.x, blockIdx.y);
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
int indices[] = {0,row,col};
// bounds checking
if ( (indices[1]>=d_in_DIMS[1]) || (indices[2]>=d_in_DIMS[2]) ){
return;
}
float max_value = *(d_in + getOffset_DEVICE(indices, d_in_DIMS, 3));
int DEPTH = d_in_DIMS[0];
for(int i=0; i<DEPTH; i++){
int indices2[] = {i, row, col};
if ( max_value < *(d_in+getOffset_DEVICE(indices2, d_in_DIMS, 3)) ){
max_value = *(d_in+getOffset_DEVICE(indices2, d_in_DIMS, 3));
}
}
// store the max value in the (row,col) location of d_out
int indices3[] = {row,col};
*(d_out + getOffset_DEVICE(indices3, d_out_DIMS, 2)) = max_value;
}
int main()
{
srand(time(0));
const int HEIGHT=6;
const int WIDTH=7;
const int DEPTH=2;
// input array (host)
int h_in_DIMS[] = {DEPTH, HEIGHT, WIDTH};
float * h_in = new3dArray(DEPTH, HEIGHT, WIDTH);
randFill3d(h_in, h_in_DIMS);
printArr3d(h_in, h_in_DIMS);
// output array (host)
int h_out_DIMS[] = {HEIGHT, WIDTH};
float * h_out = new2dArray(HEIGHT, WIDTH);
randFill2d(h_out, h_out_DIMS);
// max value
//KERNEL_max(h_out, h_out_DIMS, h_in, h_in_DIMS, 2,3);
// Split the computation up into multiple tiles
const int MAX_TILE_DIM = 4;
int NUM_TILES_X = ceil(float(WIDTH) / MAX_TILE_DIM);
int NUM_TILES_Y = ceil(float(HEIGHT) / MAX_TILE_DIM);
dim3 grid(NUM_TILES_X, NUM_TILES_Y);
printf("\nX_TILES:%d Y_TILEs: %d \n",NUM_TILES_X, NUM_TILES_Y);
float * d_in;
float * d_out;
int * d_out_DIMS;
int * d_in_DIMS;
cudaMalloc((void **) &d_in, DEPTH*HEIGHT*WIDTH*sizeof(float));
cudaMalloc((void **) &d_out, HEIGHT*WIDTH*sizeof(float));
cudaMalloc((void **) &d_out_DIMS, 2*sizeof(int));
cudaMalloc((void **) &d_in_DIMS, 3*sizeof(int));
cudaMemcpy(d_in, h_in, DEPTH*HEIGHT*WIDTH*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_in_DIMS, h_in_DIMS, 3*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_out_DIMS, h_out_DIMS, 2*sizeof(int), cudaMemcpyHostToDevice);
//dim3 block(WIDTH, HEIGHT);
dim3 block(MAX_TILE_DIM, MAX_TILE_DIM);
KERNEL_max<<<grid, block>>>(d_out, d_out_DIMS, d_in, d_in_DIMS);
cudaMemcpy(h_out, d_out, HEIGHT*WIDTH*sizeof(float), cudaMemcpyDeviceToHost);
printf("\n Final channel pooled output -->\n");
printArr2d(h_out, h_out_DIMS);
cudaFree(d_in);
cudaFree(d_out);
free(h_in);
free(h_out);
return 0;
}
|
8d10979b9c3313c769a32886cdc2a4f51b4cf679.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <iostream>
#include <algorithm>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 1024 // 2^9
#define BLOCKS 32768 // 2^15
#define NUM_VALS THREADS*BLOCKS
using namespace std;
void print_elapsed(clock_t start, clock_t stop) {
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
void array_print(int* arr, int length) {
int i;
for (i = 0; i < length; ++i) {
printf("%1.3f ", arr[i]);
}
printf("\n");
}
void array_fill(int* arr, int length) {
srand(time(NULL));
int i;
for (i = 0; i < length; ++i) {
arr[i] = rand();
}
}
bool comparison_arrays(int* arr1, int* arr2, int length) {
for (int i = 0; i < length; i++) {
if (arr1[i] != arr2[i]) {
return false;
}
}
return true;
}
int* get_copy_array(int* sourse, int length) {
int* dest = new int[length];
for (int i = 0; i < length; i++) {
dest[i] = sourse[i];
}
return dest;
}
int power_ceil(int x) {
if (x <= 1) return 1;
int power = 2;
x--;
while (x >>= 1) power <<= 1;
return power;
}
__global__ void bitonic_sort_step(int* dev_values, int j, int k) {
unsigned int i, ixj;
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
if ((ixj) > i) {
if ((i & k) == 0) {
/* Sort ascending */
if (dev_values[i] > dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0) {
/* Sort descending */
if (dev_values[i] < dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
void bitonic_sort(int* values) {
int* dev_values;
size_t size = NUM_VALS * sizeof(int);
hipMalloc((void**)&dev_values, size);
hipMemcpy(dev_values, values, size, hipMemcpyHostToDevice);
dim3 blocks(BLOCKS, 1);
dim3 threads(THREADS, 1);
int j, k;
for (k = 2; k <= NUM_VALS; k <<= 1) {
for (j = k >> 1; j > 0; j = j >> 1) {
hipLaunchKernelGGL(( bitonic_sort_step) , dim3(blocks), dim3(threads), 0, 0, dev_values, j, k);
}
}
hipMemcpy(values, dev_values, size, hipMemcpyDeviceToHost);
hipFree(dev_values);
}
bool is_bitonic(int*v, int length) {
bool was_decreasing = v[length - 1] > v[0];
int num_inflections = 0;
for (int i = 0; i < length && num_inflections <= 2; i++) {
bool is_decreasing = v[i] > v[(i + 1) % length];
// Check if this element and next one are an inflection.
if (was_decreasing != is_decreasing) {
num_inflections++;
was_decreasing = is_decreasing;
}
}
return 2 == num_inflections;
}
int main(void)
{
clock_t start, stop;
int length = 0;
cout << "Enter length of the array: ";
cin >> length;
int* values = (int*)malloc(NUM_VALS * sizeof(int));
array_fill(values, NUM_VALS);
int* temp = get_copy_array(values, NUM_VALS);
sort(temp, temp + NUM_VALS);
start = clock();
bitonic_sort(values);
stop = clock();
cout << "is_bitonic: " << is_bitonic(values, NUM_VALS) << endl;;
cout << "is equals: " << comparison_arrays(values, temp, NUM_VALS) << endl;
print_elapsed(start, stop);
}
| 8d10979b9c3313c769a32886cdc2a4f51b4cf679.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <iostream>
#include <algorithm>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 1024 // 2^9
#define BLOCKS 32768 // 2^15
#define NUM_VALS THREADS*BLOCKS
using namespace std;
void print_elapsed(clock_t start, clock_t stop) {
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
void array_print(int* arr, int length) {
int i;
for (i = 0; i < length; ++i) {
printf("%1.3f ", arr[i]);
}
printf("\n");
}
void array_fill(int* arr, int length) {
srand(time(NULL));
int i;
for (i = 0; i < length; ++i) {
arr[i] = rand();
}
}
bool comparison_arrays(int* arr1, int* arr2, int length) {
for (int i = 0; i < length; i++) {
if (arr1[i] != arr2[i]) {
return false;
}
}
return true;
}
int* get_copy_array(int* sourse, int length) {
int* dest = new int[length];
for (int i = 0; i < length; i++) {
dest[i] = sourse[i];
}
return dest;
}
int power_ceil(int x) {
if (x <= 1) return 1;
int power = 2;
x--;
while (x >>= 1) power <<= 1;
return power;
}
__global__ void bitonic_sort_step(int* dev_values, int j, int k) {
unsigned int i, ixj;
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
if ((ixj) > i) {
if ((i & k) == 0) {
/* Sort ascending */
if (dev_values[i] > dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0) {
/* Sort descending */
if (dev_values[i] < dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
void bitonic_sort(int* values) {
int* dev_values;
size_t size = NUM_VALS * sizeof(int);
cudaMalloc((void**)&dev_values, size);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS, 1);
dim3 threads(THREADS, 1);
int j, k;
for (k = 2; k <= NUM_VALS; k <<= 1) {
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_step <<<blocks, threads>>> (dev_values, j, k);
}
}
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
}
bool is_bitonic(int*v, int length) {
bool was_decreasing = v[length - 1] > v[0];
int num_inflections = 0;
for (int i = 0; i < length && num_inflections <= 2; i++) {
bool is_decreasing = v[i] > v[(i + 1) % length];
// Check if this element and next one are an inflection.
if (was_decreasing != is_decreasing) {
num_inflections++;
was_decreasing = is_decreasing;
}
}
return 2 == num_inflections;
}
int main(void)
{
clock_t start, stop;
int length = 0;
cout << "Enter length of the array: ";
cin >> length;
int* values = (int*)malloc(NUM_VALS * sizeof(int));
array_fill(values, NUM_VALS);
int* temp = get_copy_array(values, NUM_VALS);
sort(temp, temp + NUM_VALS);
start = clock();
bitonic_sort(values);
stop = clock();
cout << "is_bitonic: " << is_bitonic(values, NUM_VALS) << endl;;
cout << "is equals: " << comparison_arrays(values, temp, NUM_VALS) << endl;
print_elapsed(start, stop);
}
|
34a9dba5fa506ea9ba5dc152e64d5fa37a9cc283.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_plus_4_front;
int xdim0_update_halo_kernel3_plus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel3_plus_4_front;
int ydim0_update_halo_kernel3_plus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel3_plus_4_front;
int xdim1_update_halo_kernel3_plus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel3_plus_4_front;
int ydim1_update_halo_kernel3_plus_4_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel3_plus_4_front * (y) + \
xdim0_update_halo_kernel3_plus_4_front * \
ydim0_update_halo_kernel3_plus_4_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel3_plus_4_front * (y) + \
xdim1_update_halo_kernel3_plus_4_front * \
ydim1_update_halo_kernel3_plus_4_front * (z))
// user function
__device__
inline void
update_halo_kernel3_plus_4_front(double *vol_flux_x, double *mass_flux_x,
const int *fields) {
if (fields[FIELD_VOL_FLUX_X] == 1)
vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, 0, -4)];
if (fields[FIELD_MASS_FLUX_X] == 1)
mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, 0, -4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_plus_4_front(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_4_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_4_front *
ydim0_update_halo_kernel3_plus_4_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_4_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_4_front *
ydim1_update_halo_kernel3_plus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_plus_4_front(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel3_plus_4_front(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 115))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(115, "update_halo_kernel3_plus_4_front");
OPS_kernels[115].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_plus_4_front_h ||
ydim0 != ydim0_update_halo_kernel3_plus_4_front_h ||
xdim1 != xdim1_update_halo_kernel3_plus_4_front_h ||
ydim1 != ydim1_update_halo_kernel3_plus_4_front_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel3_plus_4_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel3_plus_4_front_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel3_plus_4_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel3_plus_4_front_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel3_plus_4_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel3_plus_4_front_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel3_plus_4_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel3_plus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[115].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel3_plus_4_front), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[115].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[115].mpi_time += t2 - t1;
OPS_kernels[115].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[115].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 34a9dba5fa506ea9ba5dc152e64d5fa37a9cc283.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_plus_4_front;
int xdim0_update_halo_kernel3_plus_4_front_h = -1;
__constant__ int ydim0_update_halo_kernel3_plus_4_front;
int ydim0_update_halo_kernel3_plus_4_front_h = -1;
__constant__ int xdim1_update_halo_kernel3_plus_4_front;
int xdim1_update_halo_kernel3_plus_4_front_h = -1;
__constant__ int ydim1_update_halo_kernel3_plus_4_front;
int ydim1_update_halo_kernel3_plus_4_front_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel3_plus_4_front * (y) + \
xdim0_update_halo_kernel3_plus_4_front * \
ydim0_update_halo_kernel3_plus_4_front * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel3_plus_4_front * (y) + \
xdim1_update_halo_kernel3_plus_4_front * \
ydim1_update_halo_kernel3_plus_4_front * (z))
// user function
__device__
inline void
update_halo_kernel3_plus_4_front(double *vol_flux_x, double *mass_flux_x,
const int *fields) {
if (fields[FIELD_VOL_FLUX_X] == 1)
vol_flux_x[OPS_ACC0(0, 0, 0)] = vol_flux_x[OPS_ACC0(0, 0, -4)];
if (fields[FIELD_MASS_FLUX_X] == 1)
mass_flux_x[OPS_ACC1(0, 0, 0)] = mass_flux_x[OPS_ACC1(0, 0, -4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_plus_4_front(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel3_plus_4_front +
idx_z * 1 * 1 * xdim0_update_halo_kernel3_plus_4_front *
ydim0_update_halo_kernel3_plus_4_front;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel3_plus_4_front +
idx_z * 1 * 1 * xdim1_update_halo_kernel3_plus_4_front *
ydim1_update_halo_kernel3_plus_4_front;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_plus_4_front(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel3_plus_4_front(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 115))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(115, "update_halo_kernel3_plus_4_front");
OPS_kernels[115].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_plus_4_front_h ||
ydim0 != ydim0_update_halo_kernel3_plus_4_front_h ||
xdim1 != xdim1_update_halo_kernel3_plus_4_front_h ||
ydim1 != ydim1_update_halo_kernel3_plus_4_front_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel3_plus_4_front, &xdim0,
sizeof(int));
xdim0_update_halo_kernel3_plus_4_front_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel3_plus_4_front, &ydim0,
sizeof(int));
ydim0_update_halo_kernel3_plus_4_front_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel3_plus_4_front, &xdim1,
sizeof(int));
xdim1_update_halo_kernel3_plus_4_front_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel3_plus_4_front, &ydim1,
sizeof(int));
ydim1_update_halo_kernel3_plus_4_front_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[115].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel3_plus_4_front<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[115].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[115].mpi_time += t2 - t1;
OPS_kernels[115].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[115].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
785c188fb415fe928836acfaf8d0ef3f8230ac3a.hip | // !!! This is a file automatically generated by hipify!!!
#include <opendnn.h>
#include <iostream>
#include <cstring>
using namespace std;
int main () {
// Declaration
opendnnHandle_t handle;
opendnnTensorDescriptor_t input_desc, output_desc, bias_desc;
opendnnFilterDescriptor_t filter_desc;
opendnnConvolutionDescriptor_t conv_desc;
// Test dimensions
int n=1; // batch
int c=1, h=4, w=4;
int oc=1, oh=3, ow=3;
int kh=2, kw=2;
int ngroup = 1; // Grouped conv_descolution for AlexNet, MobileNet, etc.
// Initialization
opendnnCreate(&handle);
opendnnCreateTensorDescriptor(&input_desc);
opendnnCreateFilterDescriptor(&filter_desc);
opendnnCreateTensorDescriptor(&output_desc);
opendnnCreateTensorDescriptor(&bias_desc);
opendnnCreateConvolutionDescriptor(&conv_desc);
opendnnSetTensor4dDescriptor(input_desc, n, c, h, w);
opendnnSetFilter4dDescriptor(filter_desc, oc, c, kh, kw);
opendnnSetTensor4dDescriptor(output_desc, n, oc, oh, ow);
opendnnSetTensor4dDescriptor(bias_desc, 1, oc, 1, 1);
opendnnSetConvolution2dDescriptor(conv_desc, /*pad_h=*/0,/*pad_w=*/0,
/*str_h=*/1,/*str_w=*/1,
/*dir_h=*/0,/*dir_w=*/0);
opendnnSetConvolutionGroupCount(conv_desc, ngroup);
const float input[16] = {1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4};
const float filter[4] = {1,0,0,1};
float output[9] = {0,};
float bias[1] = {1};
// GPU device memory allocation and init
float *input_dev, *filter_dev, *output_dev, *bias_dev = NULL;
hipMalloc(&input_dev, sizeof(float)*n*c*h*w);
hipMalloc(&filter_dev, sizeof(float)*oc*c*kh*kw);
hipMalloc(&output_dev, sizeof(float)*n*oc*oh*ow);
hipMalloc(&bias_dev, sizeof(float)*c);
hipMemcpy(input_dev, input, sizeof(float)*n*c*h*w, hipMemcpyHostToDevice);
hipMemcpy(filter_dev, filter, sizeof(float)*oc*c*kh*kw, hipMemcpyHostToDevice);
hipMemcpy(output_dev, output, sizeof(float)*n*oc*oh*ow, hipMemcpyHostToDevice);
hipMemcpy(bias_dev, bias, sizeof(float)*c, hipMemcpyHostToDevice);
// For now, workspace is needed to save im2col transposed input tensor
// opendnnGetConvolutionForwardWorkspaceSize returns just n*oc*kh*kw*oh*ow*sizeof(float)
size_t size_in_bytes;
float* workspace;
opendnnGetConvolutionForwardWorkspaceSize(handle,
input_desc,
filter_desc, conv_desc,
output_desc, &size_in_bytes);
hipMalloc(&workspace, size_in_bytes);
// Perform convolution
opendnnConvolutionForward(handle,
input_desc, input_dev,
filter_desc, filter_dev, conv_desc,
workspace, size_in_bytes,
output_desc, output_dev
);
// Perform bias addition
opendnnAddTensor(handle, bias_desc, bias_dev, output_desc, output_dev);
// Get results
hipMemcpy(output, output_dev, sizeof(float)*n*oc*oh*ow, hipMemcpyDeviceToHost);
for (int i = 0; i < 9; i++) {
cout << output[i] << '\n';
}
cout << "Done" << endl;
opendnnDestroy(handle);
return 0;
}
| 785c188fb415fe928836acfaf8d0ef3f8230ac3a.cu | #include <opendnn.h>
#include <iostream>
#include <cstring>
using namespace std;
int main () {
// Declaration
opendnnHandle_t handle;
opendnnTensorDescriptor_t input_desc, output_desc, bias_desc;
opendnnFilterDescriptor_t filter_desc;
opendnnConvolutionDescriptor_t conv_desc;
// Test dimensions
int n=1; // batch
int c=1, h=4, w=4;
int oc=1, oh=3, ow=3;
int kh=2, kw=2;
int ngroup = 1; // Grouped conv_descolution for AlexNet, MobileNet, etc.
// Initialization
opendnnCreate(&handle);
opendnnCreateTensorDescriptor(&input_desc);
opendnnCreateFilterDescriptor(&filter_desc);
opendnnCreateTensorDescriptor(&output_desc);
opendnnCreateTensorDescriptor(&bias_desc);
opendnnCreateConvolutionDescriptor(&conv_desc);
opendnnSetTensor4dDescriptor(input_desc, n, c, h, w);
opendnnSetFilter4dDescriptor(filter_desc, oc, c, kh, kw);
opendnnSetTensor4dDescriptor(output_desc, n, oc, oh, ow);
opendnnSetTensor4dDescriptor(bias_desc, 1, oc, 1, 1);
opendnnSetConvolution2dDescriptor(conv_desc, /*pad_h=*/0,/*pad_w=*/0,
/*str_h=*/1,/*str_w=*/1,
/*dir_h=*/0,/*dir_w=*/0);
opendnnSetConvolutionGroupCount(conv_desc, ngroup);
const float input[16] = {1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4};
const float filter[4] = {1,0,0,1};
float output[9] = {0,};
float bias[1] = {1};
// GPU device memory allocation and init
float *input_dev, *filter_dev, *output_dev, *bias_dev = NULL;
cudaMalloc(&input_dev, sizeof(float)*n*c*h*w);
cudaMalloc(&filter_dev, sizeof(float)*oc*c*kh*kw);
cudaMalloc(&output_dev, sizeof(float)*n*oc*oh*ow);
cudaMalloc(&bias_dev, sizeof(float)*c);
cudaMemcpy(input_dev, input, sizeof(float)*n*c*h*w, cudaMemcpyHostToDevice);
cudaMemcpy(filter_dev, filter, sizeof(float)*oc*c*kh*kw, cudaMemcpyHostToDevice);
cudaMemcpy(output_dev, output, sizeof(float)*n*oc*oh*ow, cudaMemcpyHostToDevice);
cudaMemcpy(bias_dev, bias, sizeof(float)*c, cudaMemcpyHostToDevice);
// For now, workspace is needed to save im2col transposed input tensor
// opendnnGetConvolutionForwardWorkspaceSize returns just n*oc*kh*kw*oh*ow*sizeof(float)
size_t size_in_bytes;
float* workspace;
opendnnGetConvolutionForwardWorkspaceSize(handle,
input_desc,
filter_desc, conv_desc,
output_desc, &size_in_bytes);
cudaMalloc(&workspace, size_in_bytes);
// Perform convolution
opendnnConvolutionForward(handle,
input_desc, input_dev,
filter_desc, filter_dev, conv_desc,
workspace, size_in_bytes,
output_desc, output_dev
);
// Perform bias addition
opendnnAddTensor(handle, bias_desc, bias_dev, output_desc, output_dev);
// Get results
cudaMemcpy(output, output_dev, sizeof(float)*n*oc*oh*ow, cudaMemcpyDeviceToHost);
for (int i = 0; i < 9; i++) {
cout << output[i] << '\n';
}
cout << "Done" << endl;
opendnnDestroy(handle);
return 0;
}
|
ba70c186e20ea110b55e419322f8af442f9bbada.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "box3d3r-16x16-1-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 3 - 3);
const AN5D_TYPE __c3Pad = (3);
#define __c3 c3
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
const AN5D_TYPE __halo3 = 3;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 10;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_1_5;
float __reg_1_6;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.176f) * (__REGREF(__a, 0, 0))) + (0.0010f * (__SBREF(__a_sb, -3, -3)))) + (0.0020f * (__SBREF(__a_sb, -3, -2)))) + (0.0030f * (__SBREF(__a_sb, -3, -1)))) + (0.0040f * (__SBREF(__a_sb, -3, 0)))) + (0.0050f * (__SBREF(__a_sb, -3, 1)))) + (0.0060f * (__SBREF(__a_sb, -3, 2)))) + (0.0070f * (__SBREF(__a_sb, -3, 3)))) + (0.0080f * (__SBREF(__a_sb, -2, -3)))) + (0.0090f * (__SBREF(__a_sb, -2, -2)))) + (0.0100f * (__SBREF(__a_sb, -2, -1)))) + (0.0110f * (__SBREF(__a_sb, -2, 0)))) + (0.0120f * (__SBREF(__a_sb, -2, 1)))) + (0.0130f * (__SBREF(__a_sb, -2, 2)))) + (0.0140f * (__SBREF(__a_sb, -2, 3)))) + (0.0150f * (__SBREF(__a_sb, -1, -3)))) + (0.0160f * (__SBREF(__a_sb, -1, -2)))) + (0.0170f * (__SBREF(__a_sb, -1, -1)))) + (0.0180f * (__SBREF(__a_sb, -1, 0)))) + (0.0190f * (__SBREF(__a_sb, -1, 1)))) + (0.0200f * (__SBREF(__a_sb, -1, 2)))) + (0.0210f * (__SBREF(__a_sb, -1, 3)))) + (0.0220f * (__SBREF(__a_sb, 0, -3)))) + (0.0230f * (__SBREF(__a_sb, 0, -2)))) + (0.0240f * (__SBREF(__a_sb, 0, -1)))) + (0.0250f * (__SBREF(__a_sb, 0, 1)))) + (0.0260f * (__SBREF(__a_sb, 0, 2)))) + (0.0270f * (__SBREF(__a_sb, 0, 3)))) + (0.0280f * (__SBREF(__a_sb, 1, -3)))) + (0.0290f * (__SBREF(__a_sb, 1, -2)))) + (0.0300f * (__SBREF(__a_sb, 1, -1)))) + (0.0310f * (__SBREF(__a_sb, 1, 0)))) + (0.0320f * (__SBREF(__a_sb, 1, 1)))) + (0.0330f * (__SBREF(__a_sb, 1, 2)))) + (0.0340f * (__SBREF(__a_sb, 1, 3)))) + (0.0350f * (__SBREF(__a_sb, 2, -3)))) + (0.0360f * (__SBREF(__a_sb, 2, -2)))) + (0.0370f * (__SBREF(__a_sb, 2, -1)))) + (0.0380f * (__SBREF(__a_sb, 2, 0)))) + (0.0390f * (__SBREF(__a_sb, 2, 1)))) + (0.0400f * (__SBREF(__a_sb, 2, 2)))) + (0.0410f * (__SBREF(__a_sb, 2, 3)))) + (0.0420f * (__SBREF(__a_sb, 3, -3)))) + (0.0430f * (__SBREF(__a_sb, 3, -2)))) + (0.0440f * (__SBREF(__a_sb, 3, -1)))) + (0.0450f * (__SBREF(__a_sb, 3, 0)))) + (0.0460f * (__SBREF(__a_sb, 3, 1)))) + (0.0470f * (__SBREF(__a_sb, 3, 2)))) + (0.0480f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.1808f * (__REGREF(__a, 0, 0)))) - (0.0011f * (__SBREF(__a_sb, -3, -3)))) - (0.0021f * (__SBREF(__a_sb, -3, -2)))) - (0.0031f * (__SBREF(__a_sb, -3, -1)))) - (0.0041f * (__SBREF(__a_sb, -3, 0)))) - (0.0051f * (__SBREF(__a_sb, -3, 1)))) - (0.0061f * (__SBREF(__a_sb, -3, 2)))) - (0.0071f * (__SBREF(__a_sb, -3, 3)))) - (0.0081f * (__SBREF(__a_sb, -2, -3)))) - (0.0091f * (__SBREF(__a_sb, -2, -2)))) - (0.0101f * (__SBREF(__a_sb, -2, -1)))) - (0.0111f * (__SBREF(__a_sb, -2, 0)))) - (0.0121f * (__SBREF(__a_sb, -2, 1)))) - (0.0131f * (__SBREF(__a_sb, -2, 2)))) - (0.0141f * (__SBREF(__a_sb, -2, 3)))) - (0.0151f * (__SBREF(__a_sb, -1, -3)))) - (0.0161f * (__SBREF(__a_sb, -1, -2)))) - (0.0171f * (__SBREF(__a_sb, -1, -1)))) - (0.0181f * (__SBREF(__a_sb, -1, 0)))) - (0.0191f * (__SBREF(__a_sb, -1, 1)))) - (0.0201f * (__SBREF(__a_sb, -1, 2)))) - (0.0211f * (__SBREF(__a_sb, -1, 3)))) - (0.0221f * (__SBREF(__a_sb, 0, -3)))) - (0.0231f * (__SBREF(__a_sb, 0, -2)))) - (0.0241f * (__SBREF(__a_sb, 0, -1)))) - (0.0251f * (__SBREF(__a_sb, 0, 1)))) - (0.0261f * (__SBREF(__a_sb, 0, 2)))) - (0.0271f * (__SBREF(__a_sb, 0, 3)))) - (0.0281f * (__SBREF(__a_sb, 1, -3)))) - (0.0291f * (__SBREF(__a_sb, 1, -2)))) - (0.0301f * (__SBREF(__a_sb, 1, -1)))) - (0.0311f * (__SBREF(__a_sb, 1, 0)))) - (0.0321f * (__SBREF(__a_sb, 1, 1)))) - (0.0331f * (__SBREF(__a_sb, 1, 2)))) - (0.0341f * (__SBREF(__a_sb, 1, 3)))) - (0.0351f * (__SBREF(__a_sb, 2, -3)))) - (0.0361f * (__SBREF(__a_sb, 2, -2)))) - (0.0371f * (__SBREF(__a_sb, 2, -1)))) - (0.0381f * (__SBREF(__a_sb, 2, 0)))) - (0.0391f * (__SBREF(__a_sb, 2, 1)))) - (0.0401f * (__SBREF(__a_sb, 2, 2)))) - (0.0411f * (__SBREF(__a_sb, 2, 3)))) - (0.0421f * (__SBREF(__a_sb, 3, -3)))) - (0.0431f * (__SBREF(__a_sb, 3, -2)))) - (0.0441f * (__SBREF(__a_sb, 3, -1)))) - (0.0451f * (__SBREF(__a_sb, 3, 0)))) - (0.0461f * (__SBREF(__a_sb, 3, 1)))) - (0.0471f * (__SBREF(__a_sb, 3, 2)))) - (0.0481f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.1856f) * (__REGREF(__a, 0, 0)))) + (0.0012f * (__SBREF(__a_sb, -3, -3)))) + (0.0022f * (__SBREF(__a_sb, -3, -2)))) + (0.0032f * (__SBREF(__a_sb, -3, -1)))) + (0.0042f * (__SBREF(__a_sb, -3, 0)))) + (0.0052f * (__SBREF(__a_sb, -3, 1)))) + (0.0062f * (__SBREF(__a_sb, -3, 2)))) + (0.0072f * (__SBREF(__a_sb, -3, 3)))) + (0.0082f * (__SBREF(__a_sb, -2, -3)))) + (0.0092f * (__SBREF(__a_sb, -2, -2)))) + (0.0102f * (__SBREF(__a_sb, -2, -1)))) + (0.0112f * (__SBREF(__a_sb, -2, 0)))) + (0.0122f * (__SBREF(__a_sb, -2, 1)))) + (0.0132f * (__SBREF(__a_sb, -2, 2)))) + (0.0142f * (__SBREF(__a_sb, -2, 3)))) + (0.0152f * (__SBREF(__a_sb, -1, -3)))) + (0.0162f * (__SBREF(__a_sb, -1, -2)))) + (0.0172f * (__SBREF(__a_sb, -1, -1)))) + (0.0182f * (__SBREF(__a_sb, -1, 0)))) + (0.0192f * (__SBREF(__a_sb, -1, 1)))) + (0.0202f * (__SBREF(__a_sb, -1, 2)))) + (0.0212f * (__SBREF(__a_sb, -1, 3)))) + (0.0222f * (__SBREF(__a_sb, 0, -3)))) + (0.0232f * (__SBREF(__a_sb, 0, -2)))) + (0.0242f * (__SBREF(__a_sb, 0, -1)))) + (0.0252f * (__SBREF(__a_sb, 0, 1)))) + (0.0262f * (__SBREF(__a_sb, 0, 2)))) + (0.0272f * (__SBREF(__a_sb, 0, 3)))) + (0.0282f * (__SBREF(__a_sb, 1, -3)))) + (0.0292f * (__SBREF(__a_sb, 1, -2)))) + (0.0302f * (__SBREF(__a_sb, 1, -1)))) + (0.0312f * (__SBREF(__a_sb, 1, 0)))) + (0.0322f * (__SBREF(__a_sb, 1, 1)))) + (0.0332f * (__SBREF(__a_sb, 1, 2)))) + (0.0342f * (__SBREF(__a_sb, 1, 3)))) + (0.0352f * (__SBREF(__a_sb, 2, -3)))) + (0.0362f * (__SBREF(__a_sb, 2, -2)))) + (0.0372f * (__SBREF(__a_sb, 2, -1)))) + (0.0382f * (__SBREF(__a_sb, 2, 0)))) + (0.0392f * (__SBREF(__a_sb, 2, 1)))) + (0.0402f * (__SBREF(__a_sb, 2, 2)))) + (0.0412f * (__SBREF(__a_sb, 2, 3)))) + (0.0422f * (__SBREF(__a_sb, 3, -3)))) + (0.0432f * (__SBREF(__a_sb, 3, -2)))) + (0.0442f * (__SBREF(__a_sb, 3, -1)))) + (0.0452f * (__SBREF(__a_sb, 3, 0)))) + (0.0462f * (__SBREF(__a_sb, 3, 1)))) + (0.0472f * (__SBREF(__a_sb, 3, 2)))) + (0.0482f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.1904f) * (__REGREF(__a, 0, 0)))) + (0.0013f * (__SBREF(__a_sb, -3, -3)))) + (0.0023f * (__SBREF(__a_sb, -3, -2)))) + (0.0033f * (__SBREF(__a_sb, -3, -1)))) + (0.0043f * (__SBREF(__a_sb, -3, 0)))) + (0.0053f * (__SBREF(__a_sb, -3, 1)))) + (0.0063f * (__SBREF(__a_sb, -3, 2)))) + (0.0073f * (__SBREF(__a_sb, -3, 3)))) + (0.0083f * (__SBREF(__a_sb, -2, -3)))) + (0.0093f * (__SBREF(__a_sb, -2, -2)))) + (0.0103f * (__SBREF(__a_sb, -2, -1)))) + (0.0113f * (__SBREF(__a_sb, -2, 0)))) + (0.0123f * (__SBREF(__a_sb, -2, 1)))) + (0.0133f * (__SBREF(__a_sb, -2, 2)))) + (0.0143f * (__SBREF(__a_sb, -2, 3)))) + (0.0153f * (__SBREF(__a_sb, -1, -3)))) + (0.0163f * (__SBREF(__a_sb, -1, -2)))) + (0.0173f * (__SBREF(__a_sb, -1, -1)))) + (0.0183f * (__SBREF(__a_sb, -1, 0)))) + (0.0193f * (__SBREF(__a_sb, -1, 1)))) + (0.0203f * (__SBREF(__a_sb, -1, 2)))) + (0.0213f * (__SBREF(__a_sb, -1, 3)))) + (0.0223f * (__SBREF(__a_sb, 0, -3)))) + (0.0233f * (__SBREF(__a_sb, 0, -2)))) + (0.0243f * (__SBREF(__a_sb, 0, -1)))) + (0.0253f * (__SBREF(__a_sb, 0, 1)))) + (0.0263f * (__SBREF(__a_sb, 0, 2)))) + (0.0273f * (__SBREF(__a_sb, 0, 3)))) + (0.0283f * (__SBREF(__a_sb, 1, -3)))) + (0.0293f * (__SBREF(__a_sb, 1, -2)))) + (0.0303f * (__SBREF(__a_sb, 1, -1)))) + (0.0313f * (__SBREF(__a_sb, 1, 0)))) + (0.0323f * (__SBREF(__a_sb, 1, 1)))) + (0.0333f * (__SBREF(__a_sb, 1, 2)))) + (0.0343f * (__SBREF(__a_sb, 1, 3)))) + (0.0353f * (__SBREF(__a_sb, 2, -3)))) + (0.0363f * (__SBREF(__a_sb, 2, -2)))) + (0.0373f * (__SBREF(__a_sb, 2, -1)))) + (0.0383f * (__SBREF(__a_sb, 2, 0)))) + (0.0393f * (__SBREF(__a_sb, 2, 1)))) + (0.0403f * (__SBREF(__a_sb, 2, 2)))) + (0.0413f * (__SBREF(__a_sb, 2, 3)))) + (0.0423f * (__SBREF(__a_sb, 3, -3)))) + (0.0433f * (__SBREF(__a_sb, 3, -2)))) + (0.0443f * (__SBREF(__a_sb, 3, -1)))) + (0.0453f * (__SBREF(__a_sb, 3, 0)))) + (0.0463f * (__SBREF(__a_sb, 3, 1)))) + (0.0473f * (__SBREF(__a_sb, 3, 2)))) + (0.0483f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.1952f * (__REGREF(__a, 0, 0)))) - (0.0014f * (__SBREF(__a_sb, -3, -3)))) - (0.0024f * (__SBREF(__a_sb, -3, -2)))) - (0.0034f * (__SBREF(__a_sb, -3, -1)))) - (0.0044f * (__SBREF(__a_sb, -3, 0)))) - (0.0054f * (__SBREF(__a_sb, -3, 1)))) - (0.0064f * (__SBREF(__a_sb, -3, 2)))) - (0.0074f * (__SBREF(__a_sb, -3, 3)))) - (0.0084f * (__SBREF(__a_sb, -2, -3)))) - (0.0094f * (__SBREF(__a_sb, -2, -2)))) - (0.0104f * (__SBREF(__a_sb, -2, -1)))) - (0.0114f * (__SBREF(__a_sb, -2, 0)))) - (0.0124f * (__SBREF(__a_sb, -2, 1)))) - (0.0134f * (__SBREF(__a_sb, -2, 2)))) - (0.0144f * (__SBREF(__a_sb, -2, 3)))) - (0.0154f * (__SBREF(__a_sb, -1, -3)))) - (0.0164f * (__SBREF(__a_sb, -1, -2)))) - (0.0174f * (__SBREF(__a_sb, -1, -1)))) - (0.0184f * (__SBREF(__a_sb, -1, 0)))) - (0.0194f * (__SBREF(__a_sb, -1, 1)))) - (0.0204f * (__SBREF(__a_sb, -1, 2)))) - (0.0214f * (__SBREF(__a_sb, -1, 3)))) - (0.0224f * (__SBREF(__a_sb, 0, -3)))) - (0.0234f * (__SBREF(__a_sb, 0, -2)))) - (0.0244f * (__SBREF(__a_sb, 0, -1)))) - (0.0254f * (__SBREF(__a_sb, 0, 1)))) - (0.0264f * (__SBREF(__a_sb, 0, 2)))) - (0.0274f * (__SBREF(__a_sb, 0, 3)))) - (0.0284f * (__SBREF(__a_sb, 1, -3)))) - (0.0294f * (__SBREF(__a_sb, 1, -2)))) - (0.0304f * (__SBREF(__a_sb, 1, -1)))) - (0.0314f * (__SBREF(__a_sb, 1, 0)))) - (0.0324f * (__SBREF(__a_sb, 1, 1)))) - (0.0334f * (__SBREF(__a_sb, 1, 2)))) - (0.0344f * (__SBREF(__a_sb, 1, 3)))) - (0.0354f * (__SBREF(__a_sb, 2, -3)))) - (0.0364f * (__SBREF(__a_sb, 2, -2)))) - (0.0374f * (__SBREF(__a_sb, 2, -1)))) - (0.0384f * (__SBREF(__a_sb, 2, 0)))) - (0.0394f * (__SBREF(__a_sb, 2, 1)))) - (0.0404f * (__SBREF(__a_sb, 2, 2)))) - (0.0414f * (__SBREF(__a_sb, 2, 3)))) - (0.0424f * (__SBREF(__a_sb, 3, -3)))) - (0.0434f * (__SBREF(__a_sb, 3, -2)))) - (0.0444f * (__SBREF(__a_sb, 3, -1)))) - (0.0454f * (__SBREF(__a_sb, 3, 0)))) - (0.0464f * (__SBREF(__a_sb, 3, 1)))) - (0.0474f * (__SBREF(__a_sb, 3, 2)))) - (0.0484f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-((-0.300f) * (__REGREF(__a, 0, 0)))) + (0.0015f * (__SBREF(__a_sb, -3, -3)))) + (0.0025f * (__SBREF(__a_sb, -3, -2)))) + (0.0035f * (__SBREF(__a_sb, -3, -1)))) + (0.0045f * (__SBREF(__a_sb, -3, 0)))) + (0.0055f * (__SBREF(__a_sb, -3, 1)))) + (0.0065f * (__SBREF(__a_sb, -3, 2)))) + (0.0075f * (__SBREF(__a_sb, -3, 3)))) + (0.0085f * (__SBREF(__a_sb, -2, -3)))) + (0.0095f * (__SBREF(__a_sb, -2, -2)))) + (0.0105f * (__SBREF(__a_sb, -2, -1)))) + (0.0115f * (__SBREF(__a_sb, -2, 0)))) + (0.0125f * (__SBREF(__a_sb, -2, 1)))) + (0.0135f * (__SBREF(__a_sb, -2, 2)))) + (0.0145f * (__SBREF(__a_sb, -2, 3)))) + (0.0155f * (__SBREF(__a_sb, -1, -3)))) + (0.0165f * (__SBREF(__a_sb, -1, -2)))) + (0.0175f * (__SBREF(__a_sb, -1, -1)))) + (0.0185f * (__SBREF(__a_sb, -1, 0)))) + (0.0195f * (__SBREF(__a_sb, -1, 1)))) + (0.0205f * (__SBREF(__a_sb, -1, 2)))) + (0.0215f * (__SBREF(__a_sb, -1, 3)))) + (0.0225f * (__SBREF(__a_sb, 0, -3)))) + (0.0235f * (__SBREF(__a_sb, 0, -2)))) + (0.0245f * (__SBREF(__a_sb, 0, -1)))) + (0.0255f * (__SBREF(__a_sb, 0, 1)))) + (0.0265f * (__SBREF(__a_sb, 0, 2)))) + (0.0275f * (__SBREF(__a_sb, 0, 3)))) + (0.0285f * (__SBREF(__a_sb, 1, -3)))) + (0.0295f * (__SBREF(__a_sb, 1, -2)))) + (0.0305f * (__SBREF(__a_sb, 1, -1)))) + (0.0315f * (__SBREF(__a_sb, 1, 0)))) + (0.0325f * (__SBREF(__a_sb, 1, 1)))) + (0.0335f * (__SBREF(__a_sb, 1, 2)))) + (0.0345f * (__SBREF(__a_sb, 1, 3)))) + (0.0355f * (__SBREF(__a_sb, 2, -3)))) + (0.0365f * (__SBREF(__a_sb, 2, -2)))) + (0.0375f * (__SBREF(__a_sb, 2, -1)))) + (0.0385f * (__SBREF(__a_sb, 2, 0)))) + (0.0395f * (__SBREF(__a_sb, 2, 1)))) + (0.0405f * (__SBREF(__a_sb, 2, 2)))) + (0.0415f * (__SBREF(__a_sb, 2, 3)))) + (0.0425f * (__SBREF(__a_sb, 3, -3)))) + (0.0435f * (__SBREF(__a_sb, 3, -2)))) + (0.0445f * (__SBREF(__a_sb, 3, -1)))) + (0.0455f * (__SBREF(__a_sb, 3, 0)))) + (0.0465f * (__SBREF(__a_sb, 3, 1)))) + (0.0475f * (__SBREF(__a_sb, 3, 2)))) + (0.1485f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { float etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((0.2048f * (__REGREF(__a, 0, 0)))) - (0.0016f * (__SBREF(__a_sb, -3, -3)))) - (0.0026f * (__SBREF(__a_sb, -3, -2)))) - (0.0036f * (__SBREF(__a_sb, -3, -1)))) - (0.0046f * (__SBREF(__a_sb, -3, 0)))) - (0.0056f * (__SBREF(__a_sb, -3, 1)))) - (0.0066f * (__SBREF(__a_sb, -3, 2)))) - (0.0076f * (__SBREF(__a_sb, -3, 3)))) - (0.0086f * (__SBREF(__a_sb, -2, -3)))) - (0.0096f * (__SBREF(__a_sb, -2, -2)))) - (0.0106f * (__SBREF(__a_sb, -2, -1)))) - (0.0116f * (__SBREF(__a_sb, -2, 0)))) - (0.0126f * (__SBREF(__a_sb, -2, 1)))) - (0.0136f * (__SBREF(__a_sb, -2, 2)))) - (0.0146f * (__SBREF(__a_sb, -2, 3)))) - (0.0156f * (__SBREF(__a_sb, -1, -3)))) - (0.0166f * (__SBREF(__a_sb, -1, -2)))) - (0.0176f * (__SBREF(__a_sb, -1, -1)))) - (0.0186f * (__SBREF(__a_sb, -1, 0)))) - (0.0196f * (__SBREF(__a_sb, -1, 1)))) - (0.0206f * (__SBREF(__a_sb, -1, 2)))) - (0.0216f * (__SBREF(__a_sb, -1, 3)))) - (0.0226f * (__SBREF(__a_sb, 0, -3)))) - (0.0236f * (__SBREF(__a_sb, 0, -2)))) - (0.0246f * (__SBREF(__a_sb, 0, -1)))) - (0.0256f * (__SBREF(__a_sb, 0, 1)))) - (0.0266f * (__SBREF(__a_sb, 0, 2)))) - (0.0276f * (__SBREF(__a_sb, 0, 3)))) - (0.0286f * (__SBREF(__a_sb, 1, -3)))) - (0.0296f * (__SBREF(__a_sb, 1, -2)))) - (0.0306f * (__SBREF(__a_sb, 1, -1)))) - (0.0316f * (__SBREF(__a_sb, 1, 0)))) - (0.0326f * (__SBREF(__a_sb, 1, 1)))) - (0.0336f * (__SBREF(__a_sb, 1, 2)))) - (0.0346f * (__SBREF(__a_sb, 1, 3)))) - (0.0356f * (__SBREF(__a_sb, 2, -3)))) - (0.0366f * (__SBREF(__a_sb, 2, -2)))) - (0.0376f * (__SBREF(__a_sb, 2, -1)))) - (0.0386f * (__SBREF(__a_sb, 2, 0)))) - (0.0396f * (__SBREF(__a_sb, 2, 1)))) - (0.0406f * (__SBREF(__a_sb, 2, 2)))) - (0.0416f * (__SBREF(__a_sb, 2, 3)))) - (0.0426f * (__SBREF(__a_sb, 3, -3)))) - (0.0436f * (__SBREF(__a_sb, 3, -2)))) - (0.0446f * (__SBREF(__a_sb, 3, -1)))) - (0.0456f * (__SBREF(__a_sb, 3, 0)))) - (0.0466f * (__SBREF(__a_sb, 3, 1)))) - (0.0476f * (__SBREF(__a_sb, 3, 2)))) - (0.0486f * (__SBREF(__a_sb, 3, 3)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { float etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, reg); } else out3 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(3, __reg_1_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(3, __reg_1_3);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 10;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 3, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 3, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 3, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 3, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 3, __reg_1_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 3, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 3, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 3, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 3, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 3, __reg_1_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 3, __reg_1_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 3, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 3, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 3, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 3, __reg_1_3);
__h++;
}
}
| ba70c186e20ea110b55e419322f8af442f9bbada.cu | #include "box3d3r-16x16-1-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 3 - 3);
const AN5D_TYPE __c3Pad = (3);
#define __c3 c3
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
const AN5D_TYPE __halo3 = 3;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 10;
const AN5D_TYPE __side3Len = 10;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_1_3;
float __reg_1_4;
float __reg_1_5;
float __reg_1_6;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.176f) * (__REGREF(__a, 0, 0))) + (0.0010f * (__SBREF(__a_sb, -3, -3)))) + (0.0020f * (__SBREF(__a_sb, -3, -2)))) + (0.0030f * (__SBREF(__a_sb, -3, -1)))) + (0.0040f * (__SBREF(__a_sb, -3, 0)))) + (0.0050f * (__SBREF(__a_sb, -3, 1)))) + (0.0060f * (__SBREF(__a_sb, -3, 2)))) + (0.0070f * (__SBREF(__a_sb, -3, 3)))) + (0.0080f * (__SBREF(__a_sb, -2, -3)))) + (0.0090f * (__SBREF(__a_sb, -2, -2)))) + (0.0100f * (__SBREF(__a_sb, -2, -1)))) + (0.0110f * (__SBREF(__a_sb, -2, 0)))) + (0.0120f * (__SBREF(__a_sb, -2, 1)))) + (0.0130f * (__SBREF(__a_sb, -2, 2)))) + (0.0140f * (__SBREF(__a_sb, -2, 3)))) + (0.0150f * (__SBREF(__a_sb, -1, -3)))) + (0.0160f * (__SBREF(__a_sb, -1, -2)))) + (0.0170f * (__SBREF(__a_sb, -1, -1)))) + (0.0180f * (__SBREF(__a_sb, -1, 0)))) + (0.0190f * (__SBREF(__a_sb, -1, 1)))) + (0.0200f * (__SBREF(__a_sb, -1, 2)))) + (0.0210f * (__SBREF(__a_sb, -1, 3)))) + (0.0220f * (__SBREF(__a_sb, 0, -3)))) + (0.0230f * (__SBREF(__a_sb, 0, -2)))) + (0.0240f * (__SBREF(__a_sb, 0, -1)))) + (0.0250f * (__SBREF(__a_sb, 0, 1)))) + (0.0260f * (__SBREF(__a_sb, 0, 2)))) + (0.0270f * (__SBREF(__a_sb, 0, 3)))) + (0.0280f * (__SBREF(__a_sb, 1, -3)))) + (0.0290f * (__SBREF(__a_sb, 1, -2)))) + (0.0300f * (__SBREF(__a_sb, 1, -1)))) + (0.0310f * (__SBREF(__a_sb, 1, 0)))) + (0.0320f * (__SBREF(__a_sb, 1, 1)))) + (0.0330f * (__SBREF(__a_sb, 1, 2)))) + (0.0340f * (__SBREF(__a_sb, 1, 3)))) + (0.0350f * (__SBREF(__a_sb, 2, -3)))) + (0.0360f * (__SBREF(__a_sb, 2, -2)))) + (0.0370f * (__SBREF(__a_sb, 2, -1)))) + (0.0380f * (__SBREF(__a_sb, 2, 0)))) + (0.0390f * (__SBREF(__a_sb, 2, 1)))) + (0.0400f * (__SBREF(__a_sb, 2, 2)))) + (0.0410f * (__SBREF(__a_sb, 2, 3)))) + (0.0420f * (__SBREF(__a_sb, 3, -3)))) + (0.0430f * (__SBREF(__a_sb, 3, -2)))) + (0.0440f * (__SBREF(__a_sb, 3, -1)))) + (0.0450f * (__SBREF(__a_sb, 3, 0)))) + (0.0460f * (__SBREF(__a_sb, 3, 1)))) + (0.0470f * (__SBREF(__a_sb, 3, 2)))) + (0.0480f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.1808f * (__REGREF(__a, 0, 0)))) - (0.0011f * (__SBREF(__a_sb, -3, -3)))) - (0.0021f * (__SBREF(__a_sb, -3, -2)))) - (0.0031f * (__SBREF(__a_sb, -3, -1)))) - (0.0041f * (__SBREF(__a_sb, -3, 0)))) - (0.0051f * (__SBREF(__a_sb, -3, 1)))) - (0.0061f * (__SBREF(__a_sb, -3, 2)))) - (0.0071f * (__SBREF(__a_sb, -3, 3)))) - (0.0081f * (__SBREF(__a_sb, -2, -3)))) - (0.0091f * (__SBREF(__a_sb, -2, -2)))) - (0.0101f * (__SBREF(__a_sb, -2, -1)))) - (0.0111f * (__SBREF(__a_sb, -2, 0)))) - (0.0121f * (__SBREF(__a_sb, -2, 1)))) - (0.0131f * (__SBREF(__a_sb, -2, 2)))) - (0.0141f * (__SBREF(__a_sb, -2, 3)))) - (0.0151f * (__SBREF(__a_sb, -1, -3)))) - (0.0161f * (__SBREF(__a_sb, -1, -2)))) - (0.0171f * (__SBREF(__a_sb, -1, -1)))) - (0.0181f * (__SBREF(__a_sb, -1, 0)))) - (0.0191f * (__SBREF(__a_sb, -1, 1)))) - (0.0201f * (__SBREF(__a_sb, -1, 2)))) - (0.0211f * (__SBREF(__a_sb, -1, 3)))) - (0.0221f * (__SBREF(__a_sb, 0, -3)))) - (0.0231f * (__SBREF(__a_sb, 0, -2)))) - (0.0241f * (__SBREF(__a_sb, 0, -1)))) - (0.0251f * (__SBREF(__a_sb, 0, 1)))) - (0.0261f * (__SBREF(__a_sb, 0, 2)))) - (0.0271f * (__SBREF(__a_sb, 0, 3)))) - (0.0281f * (__SBREF(__a_sb, 1, -3)))) - (0.0291f * (__SBREF(__a_sb, 1, -2)))) - (0.0301f * (__SBREF(__a_sb, 1, -1)))) - (0.0311f * (__SBREF(__a_sb, 1, 0)))) - (0.0321f * (__SBREF(__a_sb, 1, 1)))) - (0.0331f * (__SBREF(__a_sb, 1, 2)))) - (0.0341f * (__SBREF(__a_sb, 1, 3)))) - (0.0351f * (__SBREF(__a_sb, 2, -3)))) - (0.0361f * (__SBREF(__a_sb, 2, -2)))) - (0.0371f * (__SBREF(__a_sb, 2, -1)))) - (0.0381f * (__SBREF(__a_sb, 2, 0)))) - (0.0391f * (__SBREF(__a_sb, 2, 1)))) - (0.0401f * (__SBREF(__a_sb, 2, 2)))) - (0.0411f * (__SBREF(__a_sb, 2, 3)))) - (0.0421f * (__SBREF(__a_sb, 3, -3)))) - (0.0431f * (__SBREF(__a_sb, 3, -2)))) - (0.0441f * (__SBREF(__a_sb, 3, -1)))) - (0.0451f * (__SBREF(__a_sb, 3, 0)))) - (0.0461f * (__SBREF(__a_sb, 3, 1)))) - (0.0471f * (__SBREF(__a_sb, 3, 2)))) - (0.0481f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.1856f) * (__REGREF(__a, 0, 0)))) + (0.0012f * (__SBREF(__a_sb, -3, -3)))) + (0.0022f * (__SBREF(__a_sb, -3, -2)))) + (0.0032f * (__SBREF(__a_sb, -3, -1)))) + (0.0042f * (__SBREF(__a_sb, -3, 0)))) + (0.0052f * (__SBREF(__a_sb, -3, 1)))) + (0.0062f * (__SBREF(__a_sb, -3, 2)))) + (0.0072f * (__SBREF(__a_sb, -3, 3)))) + (0.0082f * (__SBREF(__a_sb, -2, -3)))) + (0.0092f * (__SBREF(__a_sb, -2, -2)))) + (0.0102f * (__SBREF(__a_sb, -2, -1)))) + (0.0112f * (__SBREF(__a_sb, -2, 0)))) + (0.0122f * (__SBREF(__a_sb, -2, 1)))) + (0.0132f * (__SBREF(__a_sb, -2, 2)))) + (0.0142f * (__SBREF(__a_sb, -2, 3)))) + (0.0152f * (__SBREF(__a_sb, -1, -3)))) + (0.0162f * (__SBREF(__a_sb, -1, -2)))) + (0.0172f * (__SBREF(__a_sb, -1, -1)))) + (0.0182f * (__SBREF(__a_sb, -1, 0)))) + (0.0192f * (__SBREF(__a_sb, -1, 1)))) + (0.0202f * (__SBREF(__a_sb, -1, 2)))) + (0.0212f * (__SBREF(__a_sb, -1, 3)))) + (0.0222f * (__SBREF(__a_sb, 0, -3)))) + (0.0232f * (__SBREF(__a_sb, 0, -2)))) + (0.0242f * (__SBREF(__a_sb, 0, -1)))) + (0.0252f * (__SBREF(__a_sb, 0, 1)))) + (0.0262f * (__SBREF(__a_sb, 0, 2)))) + (0.0272f * (__SBREF(__a_sb, 0, 3)))) + (0.0282f * (__SBREF(__a_sb, 1, -3)))) + (0.0292f * (__SBREF(__a_sb, 1, -2)))) + (0.0302f * (__SBREF(__a_sb, 1, -1)))) + (0.0312f * (__SBREF(__a_sb, 1, 0)))) + (0.0322f * (__SBREF(__a_sb, 1, 1)))) + (0.0332f * (__SBREF(__a_sb, 1, 2)))) + (0.0342f * (__SBREF(__a_sb, 1, 3)))) + (0.0352f * (__SBREF(__a_sb, 2, -3)))) + (0.0362f * (__SBREF(__a_sb, 2, -2)))) + (0.0372f * (__SBREF(__a_sb, 2, -1)))) + (0.0382f * (__SBREF(__a_sb, 2, 0)))) + (0.0392f * (__SBREF(__a_sb, 2, 1)))) + (0.0402f * (__SBREF(__a_sb, 2, 2)))) + (0.0412f * (__SBREF(__a_sb, 2, 3)))) + (0.0422f * (__SBREF(__a_sb, 3, -3)))) + (0.0432f * (__SBREF(__a_sb, 3, -2)))) + (0.0442f * (__SBREF(__a_sb, 3, -1)))) + (0.0452f * (__SBREF(__a_sb, 3, 0)))) + (0.0462f * (__SBREF(__a_sb, 3, 1)))) + (0.0472f * (__SBREF(__a_sb, 3, 2)))) + (0.0482f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-0.1904f) * (__REGREF(__a, 0, 0)))) + (0.0013f * (__SBREF(__a_sb, -3, -3)))) + (0.0023f * (__SBREF(__a_sb, -3, -2)))) + (0.0033f * (__SBREF(__a_sb, -3, -1)))) + (0.0043f * (__SBREF(__a_sb, -3, 0)))) + (0.0053f * (__SBREF(__a_sb, -3, 1)))) + (0.0063f * (__SBREF(__a_sb, -3, 2)))) + (0.0073f * (__SBREF(__a_sb, -3, 3)))) + (0.0083f * (__SBREF(__a_sb, -2, -3)))) + (0.0093f * (__SBREF(__a_sb, -2, -2)))) + (0.0103f * (__SBREF(__a_sb, -2, -1)))) + (0.0113f * (__SBREF(__a_sb, -2, 0)))) + (0.0123f * (__SBREF(__a_sb, -2, 1)))) + (0.0133f * (__SBREF(__a_sb, -2, 2)))) + (0.0143f * (__SBREF(__a_sb, -2, 3)))) + (0.0153f * (__SBREF(__a_sb, -1, -3)))) + (0.0163f * (__SBREF(__a_sb, -1, -2)))) + (0.0173f * (__SBREF(__a_sb, -1, -1)))) + (0.0183f * (__SBREF(__a_sb, -1, 0)))) + (0.0193f * (__SBREF(__a_sb, -1, 1)))) + (0.0203f * (__SBREF(__a_sb, -1, 2)))) + (0.0213f * (__SBREF(__a_sb, -1, 3)))) + (0.0223f * (__SBREF(__a_sb, 0, -3)))) + (0.0233f * (__SBREF(__a_sb, 0, -2)))) + (0.0243f * (__SBREF(__a_sb, 0, -1)))) + (0.0253f * (__SBREF(__a_sb, 0, 1)))) + (0.0263f * (__SBREF(__a_sb, 0, 2)))) + (0.0273f * (__SBREF(__a_sb, 0, 3)))) + (0.0283f * (__SBREF(__a_sb, 1, -3)))) + (0.0293f * (__SBREF(__a_sb, 1, -2)))) + (0.0303f * (__SBREF(__a_sb, 1, -1)))) + (0.0313f * (__SBREF(__a_sb, 1, 0)))) + (0.0323f * (__SBREF(__a_sb, 1, 1)))) + (0.0333f * (__SBREF(__a_sb, 1, 2)))) + (0.0343f * (__SBREF(__a_sb, 1, 3)))) + (0.0353f * (__SBREF(__a_sb, 2, -3)))) + (0.0363f * (__SBREF(__a_sb, 2, -2)))) + (0.0373f * (__SBREF(__a_sb, 2, -1)))) + (0.0383f * (__SBREF(__a_sb, 2, 0)))) + (0.0393f * (__SBREF(__a_sb, 2, 1)))) + (0.0403f * (__SBREF(__a_sb, 2, 2)))) + (0.0413f * (__SBREF(__a_sb, 2, 3)))) + (0.0423f * (__SBREF(__a_sb, 3, -3)))) + (0.0433f * (__SBREF(__a_sb, 3, -2)))) + (0.0443f * (__SBREF(__a_sb, 3, -1)))) + (0.0453f * (__SBREF(__a_sb, 3, 0)))) + (0.0463f * (__SBREF(__a_sb, 3, 1)))) + (0.0473f * (__SBREF(__a_sb, 3, 2)))) + (0.0483f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_3(out, a) do { float etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((0.1952f * (__REGREF(__a, 0, 0)))) - (0.0014f * (__SBREF(__a_sb, -3, -3)))) - (0.0024f * (__SBREF(__a_sb, -3, -2)))) - (0.0034f * (__SBREF(__a_sb, -3, -1)))) - (0.0044f * (__SBREF(__a_sb, -3, 0)))) - (0.0054f * (__SBREF(__a_sb, -3, 1)))) - (0.0064f * (__SBREF(__a_sb, -3, 2)))) - (0.0074f * (__SBREF(__a_sb, -3, 3)))) - (0.0084f * (__SBREF(__a_sb, -2, -3)))) - (0.0094f * (__SBREF(__a_sb, -2, -2)))) - (0.0104f * (__SBREF(__a_sb, -2, -1)))) - (0.0114f * (__SBREF(__a_sb, -2, 0)))) - (0.0124f * (__SBREF(__a_sb, -2, 1)))) - (0.0134f * (__SBREF(__a_sb, -2, 2)))) - (0.0144f * (__SBREF(__a_sb, -2, 3)))) - (0.0154f * (__SBREF(__a_sb, -1, -3)))) - (0.0164f * (__SBREF(__a_sb, -1, -2)))) - (0.0174f * (__SBREF(__a_sb, -1, -1)))) - (0.0184f * (__SBREF(__a_sb, -1, 0)))) - (0.0194f * (__SBREF(__a_sb, -1, 1)))) - (0.0204f * (__SBREF(__a_sb, -1, 2)))) - (0.0214f * (__SBREF(__a_sb, -1, 3)))) - (0.0224f * (__SBREF(__a_sb, 0, -3)))) - (0.0234f * (__SBREF(__a_sb, 0, -2)))) - (0.0244f * (__SBREF(__a_sb, 0, -1)))) - (0.0254f * (__SBREF(__a_sb, 0, 1)))) - (0.0264f * (__SBREF(__a_sb, 0, 2)))) - (0.0274f * (__SBREF(__a_sb, 0, 3)))) - (0.0284f * (__SBREF(__a_sb, 1, -3)))) - (0.0294f * (__SBREF(__a_sb, 1, -2)))) - (0.0304f * (__SBREF(__a_sb, 1, -1)))) - (0.0314f * (__SBREF(__a_sb, 1, 0)))) - (0.0324f * (__SBREF(__a_sb, 1, 1)))) - (0.0334f * (__SBREF(__a_sb, 1, 2)))) - (0.0344f * (__SBREF(__a_sb, 1, 3)))) - (0.0354f * (__SBREF(__a_sb, 2, -3)))) - (0.0364f * (__SBREF(__a_sb, 2, -2)))) - (0.0374f * (__SBREF(__a_sb, 2, -1)))) - (0.0384f * (__SBREF(__a_sb, 2, 0)))) - (0.0394f * (__SBREF(__a_sb, 2, 1)))) - (0.0404f * (__SBREF(__a_sb, 2, 2)))) - (0.0414f * (__SBREF(__a_sb, 2, 3)))) - (0.0424f * (__SBREF(__a_sb, 3, -3)))) - (0.0434f * (__SBREF(__a_sb, 3, -2)))) - (0.0444f * (__SBREF(__a_sb, 3, -1)))) - (0.0454f * (__SBREF(__a_sb, 3, 0)))) - (0.0464f * (__SBREF(__a_sb, 3, 1)))) - (0.0474f * (__SBREF(__a_sb, 3, 2)))) - (0.0484f * (__SBREF(__a_sb, 3, 3)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_4(out, a) do { float etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-((-0.300f) * (__REGREF(__a, 0, 0)))) + (0.0015f * (__SBREF(__a_sb, -3, -3)))) + (0.0025f * (__SBREF(__a_sb, -3, -2)))) + (0.0035f * (__SBREF(__a_sb, -3, -1)))) + (0.0045f * (__SBREF(__a_sb, -3, 0)))) + (0.0055f * (__SBREF(__a_sb, -3, 1)))) + (0.0065f * (__SBREF(__a_sb, -3, 2)))) + (0.0075f * (__SBREF(__a_sb, -3, 3)))) + (0.0085f * (__SBREF(__a_sb, -2, -3)))) + (0.0095f * (__SBREF(__a_sb, -2, -2)))) + (0.0105f * (__SBREF(__a_sb, -2, -1)))) + (0.0115f * (__SBREF(__a_sb, -2, 0)))) + (0.0125f * (__SBREF(__a_sb, -2, 1)))) + (0.0135f * (__SBREF(__a_sb, -2, 2)))) + (0.0145f * (__SBREF(__a_sb, -2, 3)))) + (0.0155f * (__SBREF(__a_sb, -1, -3)))) + (0.0165f * (__SBREF(__a_sb, -1, -2)))) + (0.0175f * (__SBREF(__a_sb, -1, -1)))) + (0.0185f * (__SBREF(__a_sb, -1, 0)))) + (0.0195f * (__SBREF(__a_sb, -1, 1)))) + (0.0205f * (__SBREF(__a_sb, -1, 2)))) + (0.0215f * (__SBREF(__a_sb, -1, 3)))) + (0.0225f * (__SBREF(__a_sb, 0, -3)))) + (0.0235f * (__SBREF(__a_sb, 0, -2)))) + (0.0245f * (__SBREF(__a_sb, 0, -1)))) + (0.0255f * (__SBREF(__a_sb, 0, 1)))) + (0.0265f * (__SBREF(__a_sb, 0, 2)))) + (0.0275f * (__SBREF(__a_sb, 0, 3)))) + (0.0285f * (__SBREF(__a_sb, 1, -3)))) + (0.0295f * (__SBREF(__a_sb, 1, -2)))) + (0.0305f * (__SBREF(__a_sb, 1, -1)))) + (0.0315f * (__SBREF(__a_sb, 1, 0)))) + (0.0325f * (__SBREF(__a_sb, 1, 1)))) + (0.0335f * (__SBREF(__a_sb, 1, 2)))) + (0.0345f * (__SBREF(__a_sb, 1, 3)))) + (0.0355f * (__SBREF(__a_sb, 2, -3)))) + (0.0365f * (__SBREF(__a_sb, 2, -2)))) + (0.0375f * (__SBREF(__a_sb, 2, -1)))) + (0.0385f * (__SBREF(__a_sb, 2, 0)))) + (0.0395f * (__SBREF(__a_sb, 2, 1)))) + (0.0405f * (__SBREF(__a_sb, 2, 2)))) + (0.0415f * (__SBREF(__a_sb, 2, 3)))) + (0.0425f * (__SBREF(__a_sb, 3, -3)))) + (0.0435f * (__SBREF(__a_sb, 3, -2)))) + (0.0445f * (__SBREF(__a_sb, 3, -1)))) + (0.0455f * (__SBREF(__a_sb, 3, 0)))) + (0.0465f * (__SBREF(__a_sb, 3, 1)))) + (0.0475f * (__SBREF(__a_sb, 3, 2)))) + (0.1485f * (__SBREF(__a_sb, 3, 3))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_5(out, a) do { float etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((0.2048f * (__REGREF(__a, 0, 0)))) - (0.0016f * (__SBREF(__a_sb, -3, -3)))) - (0.0026f * (__SBREF(__a_sb, -3, -2)))) - (0.0036f * (__SBREF(__a_sb, -3, -1)))) - (0.0046f * (__SBREF(__a_sb, -3, 0)))) - (0.0056f * (__SBREF(__a_sb, -3, 1)))) - (0.0066f * (__SBREF(__a_sb, -3, 2)))) - (0.0076f * (__SBREF(__a_sb, -3, 3)))) - (0.0086f * (__SBREF(__a_sb, -2, -3)))) - (0.0096f * (__SBREF(__a_sb, -2, -2)))) - (0.0106f * (__SBREF(__a_sb, -2, -1)))) - (0.0116f * (__SBREF(__a_sb, -2, 0)))) - (0.0126f * (__SBREF(__a_sb, -2, 1)))) - (0.0136f * (__SBREF(__a_sb, -2, 2)))) - (0.0146f * (__SBREF(__a_sb, -2, 3)))) - (0.0156f * (__SBREF(__a_sb, -1, -3)))) - (0.0166f * (__SBREF(__a_sb, -1, -2)))) - (0.0176f * (__SBREF(__a_sb, -1, -1)))) - (0.0186f * (__SBREF(__a_sb, -1, 0)))) - (0.0196f * (__SBREF(__a_sb, -1, 1)))) - (0.0206f * (__SBREF(__a_sb, -1, 2)))) - (0.0216f * (__SBREF(__a_sb, -1, 3)))) - (0.0226f * (__SBREF(__a_sb, 0, -3)))) - (0.0236f * (__SBREF(__a_sb, 0, -2)))) - (0.0246f * (__SBREF(__a_sb, 0, -1)))) - (0.0256f * (__SBREF(__a_sb, 0, 1)))) - (0.0266f * (__SBREF(__a_sb, 0, 2)))) - (0.0276f * (__SBREF(__a_sb, 0, 3)))) - (0.0286f * (__SBREF(__a_sb, 1, -3)))) - (0.0296f * (__SBREF(__a_sb, 1, -2)))) - (0.0306f * (__SBREF(__a_sb, 1, -1)))) - (0.0316f * (__SBREF(__a_sb, 1, 0)))) - (0.0326f * (__SBREF(__a_sb, 1, 1)))) - (0.0336f * (__SBREF(__a_sb, 1, 2)))) - (0.0346f * (__SBREF(__a_sb, 1, 3)))) - (0.0356f * (__SBREF(__a_sb, 2, -3)))) - (0.0366f * (__SBREF(__a_sb, 2, -2)))) - (0.0376f * (__SBREF(__a_sb, 2, -1)))) - (0.0386f * (__SBREF(__a_sb, 2, 0)))) - (0.0396f * (__SBREF(__a_sb, 2, 1)))) - (0.0406f * (__SBREF(__a_sb, 2, 2)))) - (0.0416f * (__SBREF(__a_sb, 2, 3)))) - (0.0426f * (__SBREF(__a_sb, 3, -3)))) - (0.0436f * (__SBREF(__a_sb, 3, -2)))) - (0.0446f * (__SBREF(__a_sb, 3, -1)))) - (0.0456f * (__SBREF(__a_sb, 3, 0)))) - (0.0466f * (__SBREF(__a_sb, 3, 1)))) - (0.0476f * (__SBREF(__a_sb, 3, 2)))) - (0.0486f * (__SBREF(__a_sb, 3, 3)))); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_6(out, a) do { float etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); } while (0);
#define __CALC1(out0, out1, out2, out3, out4, out5, out6, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, reg); } else out3 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(3, __reg_1_3);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(3, __reg_1_3);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 10;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 3, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 3, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 3, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 3, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 3, __reg_1_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
}
else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
}
else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
}
else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
}
else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 2, __reg_1_5);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 1, __reg_1_6);
__LOAD(__reg_0, __h + 3);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 4);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
__LOAD(__reg_0, __h + 5);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h + 2, __reg_1_2);
__LOAD(__reg_0, __h + 6);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h + 3, __reg_1_3);
__LOAD(__reg_0, __h + 7);
__CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h + 4, __reg_1_4);
__LOAD(__reg_0, __h + 8);
__CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0);
__STORE(__h + 5, __reg_1_5);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 7;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 3, __reg_1_5);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 3, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 3, __reg_1_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 3, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 3, __reg_1_3);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0);
__STORE(__h - 3, __reg_1_4);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_0);
__STORE(__h - 3, __reg_1_5);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_0);
__STORE(__h - 3, __reg_1_6);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 3, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 3, __reg_1_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0);
__STORE(__h - 3, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0);
__STORE(__h - 3, __reg_1_3);
__h++;
}
}
|
72759b66accd8fcdbc6be2a347f85e6dfae991f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define MAT_ROW 4
#define MAT_COL 6
#define CHANNELS 1
#define MASK_WIDTH 3
#define MASK_RADIUS MASK_WIDTH/2
#define O_TILE_WIDTH 12
#define BLOCK_WIDTH (O_TILE_WIDTH + (MASK_WIDTH-1))
void print_matrix(float* a,int n,int m)
{
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<m;j++)
{
printf("%f ",a[i*m+j]);
}
printf("\n");
}
}
void fill_mat(float* a,int n,int m)
{
//srand(time(NULL));
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<m;j++)
{
//a[i*n+j] = (rand()%2+1)*1.0;
a[i*m+j] = 1.0;
}
}
}
__global__ void convolution_shared(float *in, float* out,const float* __restrict__ M,int height, int width, int channels)
{
float sum, pixel, maskVal;
__shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y*O_TILE_WIDTH + ty;
int col_o = blockIdx.x*O_TILE_WIDTH + tx;
int row_i = row_o - MASK_RADIUS;
int col_i = col_o - MASK_RADIUS;
for (int c = 0; c < channels; c++) {
if ( (row_i >= 0) && (row_i < height) &&
(col_i >= 0) && (col_i < width) ) {
Ns[ty][tx] = in[(row_i*width + col_i)*channels + c];
}
else {
Ns[ty][tx] = 0.0f;
}
__syncthreads();
sum = 0.0;
if (ty < O_TILE_WIDTH && tx < O_TILE_WIDTH) {
for (int y = 0; y < MASK_WIDTH; y++){
for (int x = 0; x < MASK_WIDTH; x++){
pixel = Ns[ty + y][tx + x];
maskVal = M[y*MASK_WIDTH + x];
sum += pixel*maskVal;
}
}
if (row_o < height && col_o < width) {
//out[ (row_o * width + col_o) * channels + c] = min(max(0.0f,sum),1.0f);
out[ (row_o * width + col_o) * channels + c] = sum;
}
}
// __syncthreads();
}
}
int main()
{
float *mat,*d_mat;
float *mask,*d_mask;
float *result,*d_result;
//float elapsed_time=0;
//hipEvent_t start,stop;
//hipEventCreate(&start);
//hipEventCreate(&stop);
int mat_size = MAT_ROW*MAT_COL*sizeof(float);
int mask_size = MASK_WIDTH*MASK_WIDTH*sizeof(float);
mat = (float*) malloc(mat_size);
result = (float*) malloc(mat_size);
mask = (float*) malloc(mask_size);
fill_mat(mat,MAT_ROW,MAT_COL);
fill_mat(mask,MASK_WIDTH,MASK_WIDTH);
printf("Printing Matrix \n");
print_matrix(mat,MAT_ROW,MAT_COL);
printf("Printing Mask\n");
print_matrix(mask,MASK_WIDTH,MASK_WIDTH);
printf("\n");
hipMalloc((void** )&d_mat,mat_size);
hipMalloc((void** )&d_result,mat_size);
hipMalloc((void** )&d_mask,mask_size);
hipMemcpy(d_mat,mat,mat_size,hipMemcpyHostToDevice);
hipMemcpy(d_mask,mask,mask_size,hipMemcpyHostToDevice);
dim3 my_block(BLOCK_WIDTH,BLOCK_WIDTH);
dim3 my_grid((MAT_COL + BLOCK_WIDTH-1)/my_block.x,(MAT_ROW + BLOCK_WIDTH-1)/my_block.y);
hipLaunchKernelGGL(( convolution_shared), dim3(my_grid),dim3(my_block), 0, 0, d_mat, d_result, d_mask, MAT_ROW,MAT_COL,CHANNELS);
hipMemcpy(result,d_result,mat_size,hipMemcpyDeviceToHost);
printf("Printing result\n");
print_matrix(result,MAT_ROW,MAT_COL);
//printf("Elapsed time: %f\n",elapsed_time);
return 0;
}
| 72759b66accd8fcdbc6be2a347f85e6dfae991f3.cu | #include <stdio.h>
#include <stdlib.h>
#define MAT_ROW 4
#define MAT_COL 6
#define CHANNELS 1
#define MASK_WIDTH 3
#define MASK_RADIUS MASK_WIDTH/2
#define O_TILE_WIDTH 12
#define BLOCK_WIDTH (O_TILE_WIDTH + (MASK_WIDTH-1))
void print_matrix(float* a,int n,int m)
{
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<m;j++)
{
printf("%f ",a[i*m+j]);
}
printf("\n");
}
}
void fill_mat(float* a,int n,int m)
{
//srand(time(NULL));
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<m;j++)
{
//a[i*n+j] = (rand()%2+1)*1.0;
a[i*m+j] = 1.0;
}
}
}
__global__ void convolution_shared(float *in, float* out,const float* __restrict__ M,int height, int width, int channels)
{
float sum, pixel, maskVal;
__shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y*O_TILE_WIDTH + ty;
int col_o = blockIdx.x*O_TILE_WIDTH + tx;
int row_i = row_o - MASK_RADIUS;
int col_i = col_o - MASK_RADIUS;
for (int c = 0; c < channels; c++) {
if ( (row_i >= 0) && (row_i < height) &&
(col_i >= 0) && (col_i < width) ) {
Ns[ty][tx] = in[(row_i*width + col_i)*channels + c];
}
else {
Ns[ty][tx] = 0.0f;
}
__syncthreads();
sum = 0.0;
if (ty < O_TILE_WIDTH && tx < O_TILE_WIDTH) {
for (int y = 0; y < MASK_WIDTH; y++){
for (int x = 0; x < MASK_WIDTH; x++){
pixel = Ns[ty + y][tx + x];
maskVal = M[y*MASK_WIDTH + x];
sum += pixel*maskVal;
}
}
if (row_o < height && col_o < width) {
//out[ (row_o * width + col_o) * channels + c] = min(max(0.0f,sum),1.0f);
out[ (row_o * width + col_o) * channels + c] = sum;
}
}
// __syncthreads();
}
}
int main()
{
float *mat,*d_mat;
float *mask,*d_mask;
float *result,*d_result;
//float elapsed_time=0;
//cudaEvent_t start,stop;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
int mat_size = MAT_ROW*MAT_COL*sizeof(float);
int mask_size = MASK_WIDTH*MASK_WIDTH*sizeof(float);
mat = (float*) malloc(mat_size);
result = (float*) malloc(mat_size);
mask = (float*) malloc(mask_size);
fill_mat(mat,MAT_ROW,MAT_COL);
fill_mat(mask,MASK_WIDTH,MASK_WIDTH);
printf("Printing Matrix \n");
print_matrix(mat,MAT_ROW,MAT_COL);
printf("Printing Mask\n");
print_matrix(mask,MASK_WIDTH,MASK_WIDTH);
printf("\n");
cudaMalloc((void** )&d_mat,mat_size);
cudaMalloc((void** )&d_result,mat_size);
cudaMalloc((void** )&d_mask,mask_size);
cudaMemcpy(d_mat,mat,mat_size,cudaMemcpyHostToDevice);
cudaMemcpy(d_mask,mask,mask_size,cudaMemcpyHostToDevice);
dim3 my_block(BLOCK_WIDTH,BLOCK_WIDTH);
dim3 my_grid((MAT_COL + BLOCK_WIDTH-1)/my_block.x,(MAT_ROW + BLOCK_WIDTH-1)/my_block.y);
convolution_shared<<<my_grid,my_block>>>(d_mat, d_result, d_mask, MAT_ROW,MAT_COL,CHANNELS);
cudaMemcpy(result,d_result,mat_size,cudaMemcpyDeviceToHost);
printf("Printing result\n");
print_matrix(result,MAT_ROW,MAT_COL);
//printf("Elapsed time: %f\n",elapsed_time);
return 0;
}
|
3de3053d5932a4da1dbec878bdc72d08903d6a0c.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* FILENAME: test_einsum.cu
*
* AUTHORS: Yutong Huang
*
* LAST MODIFIED: Tue 25 May 2021 05:20:41 PM CST
*
* CONTACT: [email protected]
******************************************************************************/
#define CATCH_CONFIG_MAIN
#include <macro.h>
#include <catch2/catch.hpp>
#include "cudaop/cudaop.h"
namespace {
constexpr float epsilon = 0.001f;
}
TEST_CASE("bnhw,bnchw->bchw", "einsum") {
auto bnhw_data = std::vector<float>{
0.98952687, 0.04176088, 0.3844953, 0.5966755, 0.6466146, 0.40169004, 0.67907494, 0.20229353, 0.61596525,
0.22915623, 0.86638665, 0.75710434, 0.7243245, 0.32166293, 0.774219, 0.08894608, 0.2995787, 0.94479555,
0.28149107, 0.15999952, 0.8603404, 0.94172996, 0.30328578, 0.7044428, 0.21471687, 0.1937418, 0.19771501,
0.15756913, 0.41445565, 0.7875617, 0.03444672, 0.36793378, 0.4880629, 0.5425712, 0.08735592, 0.07209481,
0.8772495, 0.54418015, 0.17287406, 0.2580385, 0.98952687, 0.04176088, 0.3844953, 0.5966755, 0.6466146,
0.40169004, 0.67907494, 0.20229353, 0.61596525, 0.22915623, 0.86638665, 0.75710434, 0.7243245, 0.32166293,
0.774219, 0.08894608, 0.2995787, 0.94479555, 0.28149107, 0.15999952, 0.8603404, 0.94172996, 0.30328578,
0.7044428, 0.21471687, 0.1937418, 0.19771501, 0.15756913, 0.41445565, 0.7875617, 0.03444672, 0.36793378,
0.4880629, 0.5425712, 0.08735592, 0.07209481, 0.8772495, 0.54418015, 0.17287406, 0.2580385};
auto bnchw_data = std::vector<float>{
0.04820577, 0.82978696, 0.72180736, 0.6911125, 0.11814678, 0.01312762, 0.39848906, 0.86195713, 0.00259904,
0.26014462, 0.49165586, 0.07201776, 0.24074587, 0.56283444, 0.02860511, 0.8491378, 0.4759101, 0.85784125,
0.7365546, 0.8434943, 0.8852057, 0.36914098, 0.8211008, 0.23839638, 0.759618, 0.42733106, 0.49844787,
0.15379295, 0.89953774, 0.7949956, 0.37237963, 0.8193815, 0.7005051, 0.98153377, 0.27582574, 0.69045866,
0.47930536, 0.20860094, 0.08432359, 0.58424246, 0.5635306, 0.49006575, 0.07225095, 0.26734546, 0.6466548,
0.50442284, 0.05602058, 0.8262543, 0.5982453, 0.36856824, 0.66195023, 0.4691895, 0.4828491, 0.5522322,
0.45805427, 0.5461386, 0.68157166, 0.6302215, 0.74814206, 0.01727323, 0.82130766, 0.5274686, 0.057183,
0.79636645, 0.16983485, 0.6055268, 0.34731758, 0.9611698, 0.14894867, 0.7943162, 0.647746, 0.9030135,
0.6342851, 0.5670319, 0.5948853, 0.6032726, 0.73637587, 0.77164334, 0.19135143, 0.6663067, 0.57566446,
0.12159332, 0.7112747, 0.8085284, 0.89395845, 0.50627136, 0.63244, 0.5375169, 0.15575734, 0.026172,
0.13990885, 0.16550994, 0.95312476, 0.8317196, 0.6336803, 0.84348726, 0.31107208, 0.65081906, 0.54749215,
0.82738906, 0.146291, 0.1683158, 0.6674614, 0.55747604, 0.37911257, 0.42509592, 0.6083007, 0.52029556,
0.6542922, 0.4218075, 0.1665606, 0.3125575, 0.48092726, 0.20193796, 0.38793778, 0.84551543, 0.95340884,
0.91425925, 0.12150807, 0.41898167, 0.04820577, 0.82978696, 0.72180736, 0.6911125, 0.11814678, 0.01312762,
0.39848906, 0.86195713, 0.00259904, 0.26014462, 0.49165586, 0.07201776, 0.24074587, 0.56283444, 0.02860511,
0.8491378, 0.4759101, 0.85784125, 0.7365546, 0.8434943, 0.8852057, 0.36914098, 0.8211008, 0.23839638,
0.759618, 0.42733106, 0.49844787, 0.15379295, 0.89953774, 0.7949956, 0.37237963, 0.8193815, 0.7005051,
0.98153377, 0.27582574, 0.69045866, 0.47930536, 0.20860094, 0.08432359, 0.58424246, 0.5635306, 0.49006575,
0.07225095, 0.26734546, 0.6466548, 0.50442284, 0.05602058, 0.8262543, 0.5982453, 0.36856824, 0.66195023,
0.4691895, 0.4828491, 0.5522322, 0.45805427, 0.5461386, 0.68157166, 0.6302215, 0.74814206, 0.01727323,
0.82130766, 0.5274686, 0.057183, 0.79636645, 0.16983485, 0.6055268, 0.34731758, 0.9611698, 0.14894867,
0.7943162, 0.647746, 0.9030135, 0.6342851, 0.5670319, 0.5948853, 0.6032726, 0.73637587, 0.77164334,
0.19135143, 0.6663067, 0.57566446, 0.12159332, 0.7112747, 0.8085284, 0.89395845, 0.50627136, 0.63244,
0.5375169, 0.15575734, 0.026172, 0.13990885, 0.16550994, 0.95312476, 0.8317196, 0.6336803, 0.84348726,
0.31107208, 0.65081906, 0.54749215, 0.82738906, 0.146291, 0.1683158, 0.6674614, 0.55747604, 0.37911257,
0.42509592, 0.6083007, 0.52029556, 0.6542922, 0.4218075, 0.1665606, 0.3125575, 0.48092726, 0.20193796,
0.38793778, 0.84551543, 0.95340884, 0.91425925, 0.12150807, 0.41898167};
auto expected = std::vector<float>{
0.75430506, 0.53138566, 0.2948743, 0.97336453, 0.11286184, 0.12258908, 0.3392738, 0.32581902, 0.06333353,
0.68518674, 0.4482768, 0.38677415, 0.48394918, 0.48869818, 0.07411337, 0.11902031, 0.7885579, 1.2303976,
0.24041325, 0.30689144, 1.3712022, 0.12992372, 0.5314289, 0.7118073, 0.68312806, 0.26974055, 0.4635263,
0.11580738, 0.6186385, 0.20279026, 0.32744414, 0.68125397, 0.9725778, 0.76699007, 0.26890525, 0.12222465,
0.4164775, 0.5512481, 0.11838353, 0.30697674, 0.6834887, 0.17897362, 0.23021169, 0.55222845, 0.49953827,
0.28498048, 0.15831235, 0.24912843, 0.6396735, 0.41665912, 0.57924235, 0.47022587, 0.58446217, 0.28719836,
0.38852298, 0.10953416, 1.0405617, 1.0929521, 0.23160091, 0.11087711, 0.75430506, 0.53138566, 0.2948743,
0.97336453, 0.11286184, 0.12258908, 0.3392738, 0.32581902, 0.06333353, 0.68518674, 0.4482768, 0.38677415,
0.48394918, 0.48869818, 0.07411337, 0.11902031, 0.7885579, 1.2303976, 0.24041325, 0.30689144, 1.3712022,
0.12992372, 0.5314289, 0.7118073, 0.68312806, 0.26974055, 0.4635263, 0.11580738, 0.6186385, 0.20279026,
0.32744414, 0.68125397, 0.9725778, 0.76699007, 0.26890525, 0.12222465, 0.4164775, 0.5512481, 0.11838353,
0.30697674, 0.6834887, 0.17897362, 0.23021169, 0.55222845, 0.49953827, 0.28498048, 0.15831235, 0.24912843,
0.6396735, 0.41665912, 0.57924235, 0.47022587, 0.58446217, 0.28719836, 0.38852298, 0.10953416, 1.0405617,
1.0929521, 0.23160091, 0.11087711};
void *bnhw_device = nullptr;
void *bnchw_device = nullptr;
void *out_device = nullptr;
CUDA_CHECK(hipMalloc(&bnhw_device, sizeof(float) * bnhw_data.size()));
CUDA_CHECK(hipMalloc(&bnchw_device, sizeof(float) * bnchw_data.size()));
CUDA_CHECK(hipMalloc(&out_device, sizeof(float) * expected.size()));
CUDA_CHECK(hipMemcpy(bnhw_device, bnhw_data.data(), sizeof(float) * bnhw_data.size(), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(bnchw_device, bnchw_data.data(), sizeof(float) * bnchw_data.size(), hipMemcpyHostToDevice));
smartmore::cudaop::Einsum<smartmore::cudaop::EinsumType::kBNHW_BNCHW_To_BCHW>(
(float *)bnhw_device, (float *)bnchw_device, (float *)out_device, {2, 2, 3, 4, 5});
std::vector<float> got(expected.size());
CUDA_CHECK(hipMemcpy(got.data(), out_device, sizeof(float) * got.size(), hipMemcpyDeviceToHost));
float diff_sum = 0.0, max_diff = 0.0;
for (int i = 0; i < expected.size(); i++) {
float diff = std::abs(got[i] - expected[i]);
diff_sum += diff;
max_diff = diff > max_diff ? diff : max_diff;
}
REQUIRE(max_diff <= epsilon);
}
TEST_CASE("bchw,bchw->bhw", "einsum") {
auto bchw0_data = std::vector<float>{
0.7720386, 0.2621952, 0.9790373, 0.04775693, 0.3039677, 0.84724796, 0.05163439, 0.75772196,
0.93399405, 0.78663844, 0.4604165, 0.6283804, 0.7648064, 0.1297822, 0.95312023, 0.34413552,
0.60534424, 0.51087815, 0.35468173, 0.374331, 0.4429034, 0.09561125, 0.11056575, 0.1494092,
0.7720386, 0.2621952, 0.9790373, 0.04775693, 0.3039677, 0.84724796, 0.05163439, 0.75772196,
0.93399405, 0.78663844, 0.4604165, 0.6283804, 0.7648064, 0.1297822, 0.95312023, 0.34413552,
0.60534424, 0.51087815, 0.35468173, 0.374331, 0.4429034, 0.09561125, 0.11056575, 0.1494092};
auto bchw1_data = std::vector<float>{
0.52389467, 0.71089935, 0.975122, 0.9387723, 0.17344964, 0.12879254, 0.37104362, 0.78295726,
0.88940346, 0.5297998, 0.88897085, 0.62121063, 0.44728228, 0.2524461, 0.22395177, 0.00496765,
0.8894811, 0.4043403, 0.740176, 0.5375936, 0.13456029, 0.38475674, 0.14029208, 0.944007,
0.52389467, 0.71089935, 0.975122, 0.9387723, 0.17344964, 0.12879254, 0.37104362, 0.78295726,
0.88940346, 0.5297998, 0.88897085, 0.62121063, 0.44728228, 0.2524461, 0.22395177, 0.00496765,
0.8894811, 0.4043403, 0.740176, 0.5375936, 0.13456029, 0.38475674, 0.14029208, 0.944007};
auto expected = std::vector<float>{0.7465513, 0.2191574, 1.1681337, 0.04654243, 0.59116536, 0.31568784,
0.2816855, 0.7945019, 0.89029473, 0.45354795, 0.42480835, 0.53139997,
0.7465513, 0.2191574, 1.1681337, 0.04654243, 0.59116536, 0.31568784,
0.2816855, 0.7945019, 0.89029473, 0.45354795, 0.42480835, 0.53139997};
void *bchw_0 = nullptr;
void *bchw_1 = nullptr;
void *out = nullptr;
CUDA_CHECK(hipMalloc(&bchw_0, sizeof(float) * bchw0_data.size()));
CUDA_CHECK(hipMalloc(&bchw_1, sizeof(float) * bchw1_data.size()));
CUDA_CHECK(hipMalloc(&out, sizeof(float) * expected.size()));
CUDA_CHECK(hipMemcpy(bchw_0, bchw0_data.data(), bchw0_data.size() * sizeof(float), hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(bchw_1, bchw1_data.data(), bchw1_data.size() * sizeof(float), hipMemcpyHostToDevice));
smartmore::cudaop::Einsum<smartmore::cudaop::EinsumType::kBCHW_BCHW_To_BHW>((float *)bchw_0, (float *)bchw_1,
(float *)out, {2, 2, 3, 4});
std::vector<float> got(expected.size());
CUDA_CHECK(hipMemcpy(got.data(), out, sizeof(float) * got.size(), hipMemcpyDeviceToHost));
float diff_sum = 0.0, max_diff = 0.0;
for (int i = 0; i < expected.size(); i++) {
float diff = std::abs(got[i] - expected[i]);
diff_sum += diff;
max_diff = diff > max_diff ? diff : max_diff;
}
REQUIRE(max_diff <= epsilon);
}
| 3de3053d5932a4da1dbec878bdc72d08903d6a0c.cu | /******************************************************************************
* FILENAME: test_einsum.cu
*
* AUTHORS: Yutong Huang
*
* LAST MODIFIED: Tue 25 May 2021 05:20:41 PM CST
*
* CONTACT: [email protected]
******************************************************************************/
#define CATCH_CONFIG_MAIN
#include <macro.h>
#include <catch2/catch.hpp>
#include "cudaop/cudaop.h"
namespace {
constexpr float epsilon = 0.001f;
}
TEST_CASE("bnhw,bnchw->bchw", "einsum") {
auto bnhw_data = std::vector<float>{
0.98952687, 0.04176088, 0.3844953, 0.5966755, 0.6466146, 0.40169004, 0.67907494, 0.20229353, 0.61596525,
0.22915623, 0.86638665, 0.75710434, 0.7243245, 0.32166293, 0.774219, 0.08894608, 0.2995787, 0.94479555,
0.28149107, 0.15999952, 0.8603404, 0.94172996, 0.30328578, 0.7044428, 0.21471687, 0.1937418, 0.19771501,
0.15756913, 0.41445565, 0.7875617, 0.03444672, 0.36793378, 0.4880629, 0.5425712, 0.08735592, 0.07209481,
0.8772495, 0.54418015, 0.17287406, 0.2580385, 0.98952687, 0.04176088, 0.3844953, 0.5966755, 0.6466146,
0.40169004, 0.67907494, 0.20229353, 0.61596525, 0.22915623, 0.86638665, 0.75710434, 0.7243245, 0.32166293,
0.774219, 0.08894608, 0.2995787, 0.94479555, 0.28149107, 0.15999952, 0.8603404, 0.94172996, 0.30328578,
0.7044428, 0.21471687, 0.1937418, 0.19771501, 0.15756913, 0.41445565, 0.7875617, 0.03444672, 0.36793378,
0.4880629, 0.5425712, 0.08735592, 0.07209481, 0.8772495, 0.54418015, 0.17287406, 0.2580385};
auto bnchw_data = std::vector<float>{
0.04820577, 0.82978696, 0.72180736, 0.6911125, 0.11814678, 0.01312762, 0.39848906, 0.86195713, 0.00259904,
0.26014462, 0.49165586, 0.07201776, 0.24074587, 0.56283444, 0.02860511, 0.8491378, 0.4759101, 0.85784125,
0.7365546, 0.8434943, 0.8852057, 0.36914098, 0.8211008, 0.23839638, 0.759618, 0.42733106, 0.49844787,
0.15379295, 0.89953774, 0.7949956, 0.37237963, 0.8193815, 0.7005051, 0.98153377, 0.27582574, 0.69045866,
0.47930536, 0.20860094, 0.08432359, 0.58424246, 0.5635306, 0.49006575, 0.07225095, 0.26734546, 0.6466548,
0.50442284, 0.05602058, 0.8262543, 0.5982453, 0.36856824, 0.66195023, 0.4691895, 0.4828491, 0.5522322,
0.45805427, 0.5461386, 0.68157166, 0.6302215, 0.74814206, 0.01727323, 0.82130766, 0.5274686, 0.057183,
0.79636645, 0.16983485, 0.6055268, 0.34731758, 0.9611698, 0.14894867, 0.7943162, 0.647746, 0.9030135,
0.6342851, 0.5670319, 0.5948853, 0.6032726, 0.73637587, 0.77164334, 0.19135143, 0.6663067, 0.57566446,
0.12159332, 0.7112747, 0.8085284, 0.89395845, 0.50627136, 0.63244, 0.5375169, 0.15575734, 0.026172,
0.13990885, 0.16550994, 0.95312476, 0.8317196, 0.6336803, 0.84348726, 0.31107208, 0.65081906, 0.54749215,
0.82738906, 0.146291, 0.1683158, 0.6674614, 0.55747604, 0.37911257, 0.42509592, 0.6083007, 0.52029556,
0.6542922, 0.4218075, 0.1665606, 0.3125575, 0.48092726, 0.20193796, 0.38793778, 0.84551543, 0.95340884,
0.91425925, 0.12150807, 0.41898167, 0.04820577, 0.82978696, 0.72180736, 0.6911125, 0.11814678, 0.01312762,
0.39848906, 0.86195713, 0.00259904, 0.26014462, 0.49165586, 0.07201776, 0.24074587, 0.56283444, 0.02860511,
0.8491378, 0.4759101, 0.85784125, 0.7365546, 0.8434943, 0.8852057, 0.36914098, 0.8211008, 0.23839638,
0.759618, 0.42733106, 0.49844787, 0.15379295, 0.89953774, 0.7949956, 0.37237963, 0.8193815, 0.7005051,
0.98153377, 0.27582574, 0.69045866, 0.47930536, 0.20860094, 0.08432359, 0.58424246, 0.5635306, 0.49006575,
0.07225095, 0.26734546, 0.6466548, 0.50442284, 0.05602058, 0.8262543, 0.5982453, 0.36856824, 0.66195023,
0.4691895, 0.4828491, 0.5522322, 0.45805427, 0.5461386, 0.68157166, 0.6302215, 0.74814206, 0.01727323,
0.82130766, 0.5274686, 0.057183, 0.79636645, 0.16983485, 0.6055268, 0.34731758, 0.9611698, 0.14894867,
0.7943162, 0.647746, 0.9030135, 0.6342851, 0.5670319, 0.5948853, 0.6032726, 0.73637587, 0.77164334,
0.19135143, 0.6663067, 0.57566446, 0.12159332, 0.7112747, 0.8085284, 0.89395845, 0.50627136, 0.63244,
0.5375169, 0.15575734, 0.026172, 0.13990885, 0.16550994, 0.95312476, 0.8317196, 0.6336803, 0.84348726,
0.31107208, 0.65081906, 0.54749215, 0.82738906, 0.146291, 0.1683158, 0.6674614, 0.55747604, 0.37911257,
0.42509592, 0.6083007, 0.52029556, 0.6542922, 0.4218075, 0.1665606, 0.3125575, 0.48092726, 0.20193796,
0.38793778, 0.84551543, 0.95340884, 0.91425925, 0.12150807, 0.41898167};
auto expected = std::vector<float>{
0.75430506, 0.53138566, 0.2948743, 0.97336453, 0.11286184, 0.12258908, 0.3392738, 0.32581902, 0.06333353,
0.68518674, 0.4482768, 0.38677415, 0.48394918, 0.48869818, 0.07411337, 0.11902031, 0.7885579, 1.2303976,
0.24041325, 0.30689144, 1.3712022, 0.12992372, 0.5314289, 0.7118073, 0.68312806, 0.26974055, 0.4635263,
0.11580738, 0.6186385, 0.20279026, 0.32744414, 0.68125397, 0.9725778, 0.76699007, 0.26890525, 0.12222465,
0.4164775, 0.5512481, 0.11838353, 0.30697674, 0.6834887, 0.17897362, 0.23021169, 0.55222845, 0.49953827,
0.28498048, 0.15831235, 0.24912843, 0.6396735, 0.41665912, 0.57924235, 0.47022587, 0.58446217, 0.28719836,
0.38852298, 0.10953416, 1.0405617, 1.0929521, 0.23160091, 0.11087711, 0.75430506, 0.53138566, 0.2948743,
0.97336453, 0.11286184, 0.12258908, 0.3392738, 0.32581902, 0.06333353, 0.68518674, 0.4482768, 0.38677415,
0.48394918, 0.48869818, 0.07411337, 0.11902031, 0.7885579, 1.2303976, 0.24041325, 0.30689144, 1.3712022,
0.12992372, 0.5314289, 0.7118073, 0.68312806, 0.26974055, 0.4635263, 0.11580738, 0.6186385, 0.20279026,
0.32744414, 0.68125397, 0.9725778, 0.76699007, 0.26890525, 0.12222465, 0.4164775, 0.5512481, 0.11838353,
0.30697674, 0.6834887, 0.17897362, 0.23021169, 0.55222845, 0.49953827, 0.28498048, 0.15831235, 0.24912843,
0.6396735, 0.41665912, 0.57924235, 0.47022587, 0.58446217, 0.28719836, 0.38852298, 0.10953416, 1.0405617,
1.0929521, 0.23160091, 0.11087711};
void *bnhw_device = nullptr;
void *bnchw_device = nullptr;
void *out_device = nullptr;
CUDA_CHECK(cudaMalloc(&bnhw_device, sizeof(float) * bnhw_data.size()));
CUDA_CHECK(cudaMalloc(&bnchw_device, sizeof(float) * bnchw_data.size()));
CUDA_CHECK(cudaMalloc(&out_device, sizeof(float) * expected.size()));
CUDA_CHECK(cudaMemcpy(bnhw_device, bnhw_data.data(), sizeof(float) * bnhw_data.size(), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(bnchw_device, bnchw_data.data(), sizeof(float) * bnchw_data.size(), cudaMemcpyHostToDevice));
smartmore::cudaop::Einsum<smartmore::cudaop::EinsumType::kBNHW_BNCHW_To_BCHW>(
(float *)bnhw_device, (float *)bnchw_device, (float *)out_device, {2, 2, 3, 4, 5});
std::vector<float> got(expected.size());
CUDA_CHECK(cudaMemcpy(got.data(), out_device, sizeof(float) * got.size(), cudaMemcpyDeviceToHost));
float diff_sum = 0.0, max_diff = 0.0;
for (int i = 0; i < expected.size(); i++) {
float diff = std::abs(got[i] - expected[i]);
diff_sum += diff;
max_diff = diff > max_diff ? diff : max_diff;
}
REQUIRE(max_diff <= epsilon);
}
TEST_CASE("bchw,bchw->bhw", "einsum") {
auto bchw0_data = std::vector<float>{
0.7720386, 0.2621952, 0.9790373, 0.04775693, 0.3039677, 0.84724796, 0.05163439, 0.75772196,
0.93399405, 0.78663844, 0.4604165, 0.6283804, 0.7648064, 0.1297822, 0.95312023, 0.34413552,
0.60534424, 0.51087815, 0.35468173, 0.374331, 0.4429034, 0.09561125, 0.11056575, 0.1494092,
0.7720386, 0.2621952, 0.9790373, 0.04775693, 0.3039677, 0.84724796, 0.05163439, 0.75772196,
0.93399405, 0.78663844, 0.4604165, 0.6283804, 0.7648064, 0.1297822, 0.95312023, 0.34413552,
0.60534424, 0.51087815, 0.35468173, 0.374331, 0.4429034, 0.09561125, 0.11056575, 0.1494092};
auto bchw1_data = std::vector<float>{
0.52389467, 0.71089935, 0.975122, 0.9387723, 0.17344964, 0.12879254, 0.37104362, 0.78295726,
0.88940346, 0.5297998, 0.88897085, 0.62121063, 0.44728228, 0.2524461, 0.22395177, 0.00496765,
0.8894811, 0.4043403, 0.740176, 0.5375936, 0.13456029, 0.38475674, 0.14029208, 0.944007,
0.52389467, 0.71089935, 0.975122, 0.9387723, 0.17344964, 0.12879254, 0.37104362, 0.78295726,
0.88940346, 0.5297998, 0.88897085, 0.62121063, 0.44728228, 0.2524461, 0.22395177, 0.00496765,
0.8894811, 0.4043403, 0.740176, 0.5375936, 0.13456029, 0.38475674, 0.14029208, 0.944007};
auto expected = std::vector<float>{0.7465513, 0.2191574, 1.1681337, 0.04654243, 0.59116536, 0.31568784,
0.2816855, 0.7945019, 0.89029473, 0.45354795, 0.42480835, 0.53139997,
0.7465513, 0.2191574, 1.1681337, 0.04654243, 0.59116536, 0.31568784,
0.2816855, 0.7945019, 0.89029473, 0.45354795, 0.42480835, 0.53139997};
void *bchw_0 = nullptr;
void *bchw_1 = nullptr;
void *out = nullptr;
CUDA_CHECK(cudaMalloc(&bchw_0, sizeof(float) * bchw0_data.size()));
CUDA_CHECK(cudaMalloc(&bchw_1, sizeof(float) * bchw1_data.size()));
CUDA_CHECK(cudaMalloc(&out, sizeof(float) * expected.size()));
CUDA_CHECK(cudaMemcpy(bchw_0, bchw0_data.data(), bchw0_data.size() * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(bchw_1, bchw1_data.data(), bchw1_data.size() * sizeof(float), cudaMemcpyHostToDevice));
smartmore::cudaop::Einsum<smartmore::cudaop::EinsumType::kBCHW_BCHW_To_BHW>((float *)bchw_0, (float *)bchw_1,
(float *)out, {2, 2, 3, 4});
std::vector<float> got(expected.size());
CUDA_CHECK(cudaMemcpy(got.data(), out, sizeof(float) * got.size(), cudaMemcpyDeviceToHost));
float diff_sum = 0.0, max_diff = 0.0;
for (int i = 0; i < expected.size(); i++) {
float diff = std::abs(got[i] - expected[i]);
diff_sum += diff;
max_diff = diff > max_diff ? diff : max_diff;
}
REQUIRE(max_diff <= epsilon);
}
|
5e935b3992fe5915ff935dc790479323e6ac3e66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
# include "hip/hip_runtime.h"
#define SIZE 50
__global__ void VectorAdd(int a[], int b[], int c[], int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
c[i] = a[i] + b[i];
}
}
int main()
{
int *a, *b, *c;
a = (int*)malloc(SIZE * sizeof(int));
b = (int*)malloc(SIZE * sizeof(int));
c = (int*)malloc(SIZE * sizeof(int));
for (int i = 0; i < SIZE; i++)
{
a[i] = i+1;
b[i] = i;
}
int *d_a, *d_b, *d_c;
hipMalloc(&d_a, SIZE * sizeof(int));
hipMalloc(&d_b, SIZE * sizeof(int));
hipMalloc(&d_c, SIZE * sizeof(int));
hipMemcpy(d_a, a, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, SIZE * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( VectorAdd) , dim3(2), dim3(SIZE/2) , 0, 0, d_a, d_b, d_c, SIZE);
hipDeviceSynchronize(); // jab tak saare threads ka kaam nahi hota.... tab tak ruko
hipMemcpy(c, d_c, SIZE * sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < SIZE; i++)
printf("%d + %d = %d\n", a[i], b[i], c[i]);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
| 5e935b3992fe5915ff935dc790479323e6ac3e66.cu | #include <stdio.h>
# include "cuda_runtime.h"
#define SIZE 50
__global__ void VectorAdd(int a[], int b[], int c[], int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
c[i] = a[i] + b[i];
}
}
int main()
{
int *a, *b, *c;
a = (int*)malloc(SIZE * sizeof(int));
b = (int*)malloc(SIZE * sizeof(int));
c = (int*)malloc(SIZE * sizeof(int));
for (int i = 0; i < SIZE; i++)
{
a[i] = i+1;
b[i] = i;
}
int *d_a, *d_b, *d_c;
cudaMalloc(&d_a, SIZE * sizeof(int));
cudaMalloc(&d_b, SIZE * sizeof(int));
cudaMalloc(&d_c, SIZE * sizeof(int));
cudaMemcpy(d_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
VectorAdd <<< 2, SIZE/2 >>> (d_a, d_b, d_c, SIZE);
cudaDeviceSynchronize(); // jab tak saare threads ka kaam nahi hota.... tab tak ruko
cudaMemcpy(c, d_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < SIZE; i++)
printf("%d + %d = %d\n", a[i], b[i], c[i]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
|
2d30823221dfa6e0a342ee0e4e883c96a451db90.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include "room.cuh"
#include "math.h"
using namespace std;
__device__ __host__
void rot_around_point(float center[3], float * x, float * y, float s, float c) {
// translate point back to origin:
*x -= center[0];
*y -= center[1];
// rotate point
float xnew = *x * c - *y * s;
float ynew = *x * s + *y * c;
// translate point back:
*x = xnew + center[0];
*y = ynew + center[1];
}
//ax,ay,bx,by
void Room::init_a_wall(wall *newWall, vector<float> params) {
float ax = params[0], ay = params[1], bx = params[2], by = params[3];
newWall->translation[0] = (ax + bx) / 2;newWall->translation[1] = (ay + by) / 2;newWall->translation[2] = 0;
newWall->width = sqrtf(powf((by - ay), 2) + powf((bx - ax), 2));
copy(params.begin(), params.end(), newWall->vertices);
if (ax == bx) {
newWall->zrotation =PI/2;
newWall->b = 0; newWall->a = 1; newWall->c = -ax;
}
else if (ay == by) {
newWall->zrotation = 0;
newWall->a = 0; newWall->b = 1; newWall->c = -ay;
}
else {
newWall->a = (by - ay) / (bx - ax); newWall->b = -1; newWall->c = -(newWall->a*ax - ay);
newWall->zrotation = (atanf(newWall->a) < 0)? PI/2-atanf(newWall->a): atanf(newWall->a);
}
}
// 4*2 vertices, 2 center, 2 size, angle, label, zheight
void Room::init_an_object(vector<float>params, bool isFixed, bool isPrevious) {
singleObj obj;
obj.id = objects.size();
//vertices
copy(params.begin(), params.begin() + 8, obj.vertices);
obj.translation[0] = params[8];obj.translation[1] =params[9];obj.translation[2] =.0f;
obj.objWidth = params[10];
obj.objHeight = params[11];
set_obj_zrotation(&obj, params[12] * ANGLE_TO_RAD_F);
//obj.zrotation = params[12] * ANGLE_TO_RAD_F;
obj.catalogId = params[13];
obj.zheight = params[14];
obj.area = obj.objWidth * obj.objHeight;
obj.isFixed = isFixed;
obj.alignedTheWall = (obj.catalogId == TYPE_SHELF || obj.catalogId == TYPE_BED || obj.catalogId == TYPE_TABLE) ? true : false;
obj.adjoinWall = (obj.catalogId == TYPE_SHELF || obj.catalogId == TYPE_BED || obj.catalogId == TYPE_TABLE) ? true : false;
// TODO: is it necessary?
// if (!isPrevious)//existing objs' values should be
// update_obj_boundingBox_and_vertices(obj, 0);
//move this calculation to device
indepenFurArea += obj.objWidth * obj.objHeight; //get_single_obj_maskArea(obj.vertices);
obj.maskLen = int(sqrtf(obj.objWidth * obj.objWidth + obj.objHeight * obj.objHeight));
int gidx = 0;
for(; gidx<groupNum; gidx++){
if(groupMap[gidx].gid == params[15]){
groupMap[gidx].objIds[groupMap[gidx].memNum++] = obj.id;
break;
}
}
if(gidx == groupNum){
groupMap[groupNum].gid = params[15];
groupMap[groupNum].memNum = 1;
groupMap[groupNum].objIds[0] = obj.id;
groupNum++;
}
objects.push_back(obj);
objctNum++;
if (!isFixed)
freeObjIds[freeObjNum++] = obj.id;
}
void Room::set_pairwise_map() {
pairMap[0].pid = TYPE_CHAIR;
int mtype[3] = {TYPE_CHAIR, TYPE_COFFETABLE, TYPE_ENDTABLE};
copy(begin(mtype), end(mtype), begin(pairMap[0].objTypes));
int mdist[3] = {0,40,0}; int mdistm[3] = {50,46,30};
copy(begin(mdist), end(mdist), begin(pairMap[0].minDist));
copy(begin(mdistm), end(mdistm), begin(pairMap[0].maxDist));
pairMap[1].pid = TYPE_BED;
pairMap[1].objTypes[0] = TYPE_NIGHTSTAND;
pairMap[1].minDist[0] = 0;
pairMap[1].maxDist[0] = 30;
}
void Room::set_objs_pairwise_relation(const singleObj& obj1, const singleObj& obj2){
const singleObj* indexObj = (obj1.catalogId <= obj2.catalogId)?&obj1:&obj2;
const singleObj* compObj = (obj1.id == indexObj->id)? &obj2:&obj1;
for(int i=0; i<CONSTRAIN_PAIRS; i++){
if(indexObj->catalogId == pairMap[i].pid){
for(int j=0; pairMap[i].objTypes[j]!=-1&&j<MAX_SUPPORT_TYPE; j++){
if(pairMap[i].objTypes[j] == compObj->catalogId){
vector<int> pair{indexObj->id, compObj->id, pairMap[i].minDist[j], pairMap[i].maxDist[j]};
actualPairs.push_back(pair);
break;
}
}
}
}
}
void Room::update_mask_by_wall(const wall* wal) {
//TODO: DON'T KNOW HOW TO TACKLE WITH OBLIQUE WALL
}
void Room::CopyToSharedRoom(sharedRoom *m_room){
m_room->objctNum = objctNum;
m_room->wallNum = wallNum;
m_room->obstacleNum = obstacles.size();
m_room->freeObjNum = freeObjNum;
m_room->half_width = half_width;
m_room->half_height = half_height;
m_room->indepenFurArea = indepenFurArea;
m_room->obstacleArea = obstacleArea;
m_room->wallArea = wallArea;
m_room->overlappingThreshold = overlappingThreshold;
m_room->colCount = colCount;
m_room->rowCount = rowCount;
m_room->mskCount = colCount * rowCount;
m_room->pairNum = actualPairs.size();
m_room->groupNum = groupNum;
m_room->RoomCenter[0] = center[0];m_room->RoomCenter[1] = center[1];m_room->RoomCenter[2] = center[2];
hipMemcpy(m_room->freeObjIds, freeObjIds, freeObjNum* sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(m_room->groupMap, groupMap, MAX_GROUP_ALLOW* sizeof(groupMapStruct), hipMemcpyHostToDevice);
hipMemcpy(m_room->pairMap, pairMap, CONSTRAIN_PAIRS* sizeof(pairMapStruct), hipMemcpyHostToDevice);
for(int i=0;i<wallNum;i++)
m_room->deviceWalls[i] = walls[i];
//
//
// int tMem = colCount*rowCount * sizeof(unsigned char);
// hipMallocManaged(&furnitureMask, tMem);
// hipMallocManaged(&furnitureMask_initial, tMem);
// hipMemcpy(furnitureMask, m_room->furnitureMask, tMem, hipMemcpyHostToDevice);
// hipMemcpy(furnitureMask_initial, m_room->furnitureMask_initial, tMem, hipMemcpyHostToDevice);
// //TODO:obstacle
}
void Room::initialize_room(float s_width, float s_height) {
initialized = true;
groupNum = 0;
half_width = s_width / 2;
half_height = s_height / 2;
overlappingThreshold = s_width * s_height * 0.005;
set_pairwise_map();
rowCount = int(s_height) + 1; colCount = int(s_width)+1;
int tMem = rowCount * colCount * sizeof(unsigned char);
furnitureMask_initial = (unsigned char *)malloc(tMem);
memset(furnitureMask_initial, (unsigned char)0, colCount*rowCount);
}
void Room::add_a_wall(vector<float> params){
wall newWall;
newWall.id = walls.size();
newWall.zheight = params[4];
init_a_wall(&newWall, params);
walls.push_back(newWall);
wallNum++;
if (fabs(fmod(newWall.zrotation, PI)) > 0.01)
update_mask_by_wall(&newWall);
}
void Room::add_an_object(vector<float> params, bool isPrevious, bool isFixed) {
if (params.size() < 15) {
float hw = params[2] / 2, hh = params[3] / 2;
float cx = params[0], cy = params[1];
float res[8] = { -hw + cx, hh + cy, hw + cx, hh + cy, hw + cx, -hh + cy, -hw + cx, -hh + cy };
vector<float>vertices(res, res + 8);// get_vertices_by_pos(params[0], params[1], params[2] / 2, params[3] / 2);
params.insert(params.begin(), vertices.begin(), vertices.end());
}
if (isPrevious) {
switch (int(params[13]))
{
case 1:
params[13] = TYPE_FLOOR;
break;
case 3://chair
params[13] = TYPE_CHAIR;
break;
case 8:
params[13] = TYPE_WALL;
break;
case 10:
params[13] = TYPE_OTHER;
break;
case 11:
params[13] = TYPE_CEILING;
break;
}
}
//default groupid is 0
if(params.size()<16)
params.push_back(0);
init_an_object(params, isFixed, isPrevious);
}
void Room::add_a_focal_point(vector<float> fp) {
int groupId = (fp.size() == 3)? 0:fp[3];
for(int i=0; i<groupNum; i++){
if(groupId == groupMap[i].gid)
copy(fp.begin(), fp.begin()+3, groupMap[i].focal);
}
}
void Room::set_obj_zrotation(singleObj * obj, float nrot) {
float oldRot = obj->zrotation;
nrot = remainderf(nrot, 2*PI);
obj->zrotation = nrot;
float gap = obj->zrotation - oldRot;
float s = sinf(gap); float c=cosf(gap);
float minx = INFINITY,maxx =-INFINITY, miny=INFINITY, maxy = -INFINITY;
for(int i=0; i<4; i++){
rot_around_point(obj->translation, &obj->vertices[2*i], &obj->vertices[2*i+1], s, c);
minx = (obj->vertices[2*i] < minx)? obj->vertices[2*i]:minx;
maxx = (obj->vertices[2*i] > maxx)? obj->vertices[2*i]:maxx;
miny = (obj->vertices[2*i + 1] < miny)? obj->vertices[2*i+1]:miny;
maxy = (obj->vertices[2*i + 1] > maxy)? obj->vertices[2*i+1]:maxy;
}
obj->boundingBox.x = minx; obj->boundingBox.y=maxy;
obj->boundingBox.width = maxx-minx; obj->boundingBox.height = maxy-miny;
}
void Room::add_an_obstacle(vector<float> params) {
obstacles.push_back(params);
}
void Room::get_obstacle_vertices(float * vertices){
for(int i=0; i<obstacles.size();i++)
copy(obstacles[i].begin(), obstacles[i].end(),&vertices[8*i]);
}
// void change_obj_freeState(singleObj* obj) {
// if (obj->isFixed)
// freeObjIds.erase(remove(freeObjIds.begin(), freeObjIds.end(), obj->id));
// else
// freeObjIds.push_back(obj->id);
// obj->isFixed = !obj->isFixed;
// }
// };
| 2d30823221dfa6e0a342ee0e4e883c96a451db90.cu | #include<iostream>
#include "room.cuh"
#include "math.h"
using namespace std;
__device__ __host__
void rot_around_point(float center[3], float * x, float * y, float s, float c) {
// translate point back to origin:
*x -= center[0];
*y -= center[1];
// rotate point
float xnew = *x * c - *y * s;
float ynew = *x * s + *y * c;
// translate point back:
*x = xnew + center[0];
*y = ynew + center[1];
}
//ax,ay,bx,by
void Room::init_a_wall(wall *newWall, vector<float> params) {
float ax = params[0], ay = params[1], bx = params[2], by = params[3];
newWall->translation[0] = (ax + bx) / 2;newWall->translation[1] = (ay + by) / 2;newWall->translation[2] = 0;
newWall->width = sqrtf(powf((by - ay), 2) + powf((bx - ax), 2));
copy(params.begin(), params.end(), newWall->vertices);
if (ax == bx) {
newWall->zrotation =PI/2;
newWall->b = 0; newWall->a = 1; newWall->c = -ax;
}
else if (ay == by) {
newWall->zrotation = 0;
newWall->a = 0; newWall->b = 1; newWall->c = -ay;
}
else {
newWall->a = (by - ay) / (bx - ax); newWall->b = -1; newWall->c = -(newWall->a*ax - ay);
newWall->zrotation = (atanf(newWall->a) < 0)? PI/2-atanf(newWall->a): atanf(newWall->a);
}
}
// 4*2 vertices, 2 center, 2 size, angle, label, zheight
void Room::init_an_object(vector<float>params, bool isFixed, bool isPrevious) {
singleObj obj;
obj.id = objects.size();
//vertices
copy(params.begin(), params.begin() + 8, obj.vertices);
obj.translation[0] = params[8];obj.translation[1] =params[9];obj.translation[2] =.0f;
obj.objWidth = params[10];
obj.objHeight = params[11];
set_obj_zrotation(&obj, params[12] * ANGLE_TO_RAD_F);
//obj.zrotation = params[12] * ANGLE_TO_RAD_F;
obj.catalogId = params[13];
obj.zheight = params[14];
obj.area = obj.objWidth * obj.objHeight;
obj.isFixed = isFixed;
obj.alignedTheWall = (obj.catalogId == TYPE_SHELF || obj.catalogId == TYPE_BED || obj.catalogId == TYPE_TABLE) ? true : false;
obj.adjoinWall = (obj.catalogId == TYPE_SHELF || obj.catalogId == TYPE_BED || obj.catalogId == TYPE_TABLE) ? true : false;
// TODO: is it necessary?
// if (!isPrevious)//existing objs' values should be
// update_obj_boundingBox_and_vertices(obj, 0);
//move this calculation to device
indepenFurArea += obj.objWidth * obj.objHeight; //get_single_obj_maskArea(obj.vertices);
obj.maskLen = int(sqrtf(obj.objWidth * obj.objWidth + obj.objHeight * obj.objHeight));
int gidx = 0;
for(; gidx<groupNum; gidx++){
if(groupMap[gidx].gid == params[15]){
groupMap[gidx].objIds[groupMap[gidx].memNum++] = obj.id;
break;
}
}
if(gidx == groupNum){
groupMap[groupNum].gid = params[15];
groupMap[groupNum].memNum = 1;
groupMap[groupNum].objIds[0] = obj.id;
groupNum++;
}
objects.push_back(obj);
objctNum++;
if (!isFixed)
freeObjIds[freeObjNum++] = obj.id;
}
void Room::set_pairwise_map() {
pairMap[0].pid = TYPE_CHAIR;
int mtype[3] = {TYPE_CHAIR, TYPE_COFFETABLE, TYPE_ENDTABLE};
copy(begin(mtype), end(mtype), begin(pairMap[0].objTypes));
int mdist[3] = {0,40,0}; int mdistm[3] = {50,46,30};
copy(begin(mdist), end(mdist), begin(pairMap[0].minDist));
copy(begin(mdistm), end(mdistm), begin(pairMap[0].maxDist));
pairMap[1].pid = TYPE_BED;
pairMap[1].objTypes[0] = TYPE_NIGHTSTAND;
pairMap[1].minDist[0] = 0;
pairMap[1].maxDist[0] = 30;
}
void Room::set_objs_pairwise_relation(const singleObj& obj1, const singleObj& obj2){
const singleObj* indexObj = (obj1.catalogId <= obj2.catalogId)?&obj1:&obj2;
const singleObj* compObj = (obj1.id == indexObj->id)? &obj2:&obj1;
for(int i=0; i<CONSTRAIN_PAIRS; i++){
if(indexObj->catalogId == pairMap[i].pid){
for(int j=0; pairMap[i].objTypes[j]!=-1&&j<MAX_SUPPORT_TYPE; j++){
if(pairMap[i].objTypes[j] == compObj->catalogId){
vector<int> pair{indexObj->id, compObj->id, pairMap[i].minDist[j], pairMap[i].maxDist[j]};
actualPairs.push_back(pair);
break;
}
}
}
}
}
void Room::update_mask_by_wall(const wall* wal) {
//TODO: DON'T KNOW HOW TO TACKLE WITH OBLIQUE WALL
}
void Room::CopyToSharedRoom(sharedRoom *m_room){
m_room->objctNum = objctNum;
m_room->wallNum = wallNum;
m_room->obstacleNum = obstacles.size();
m_room->freeObjNum = freeObjNum;
m_room->half_width = half_width;
m_room->half_height = half_height;
m_room->indepenFurArea = indepenFurArea;
m_room->obstacleArea = obstacleArea;
m_room->wallArea = wallArea;
m_room->overlappingThreshold = overlappingThreshold;
m_room->colCount = colCount;
m_room->rowCount = rowCount;
m_room->mskCount = colCount * rowCount;
m_room->pairNum = actualPairs.size();
m_room->groupNum = groupNum;
m_room->RoomCenter[0] = center[0];m_room->RoomCenter[1] = center[1];m_room->RoomCenter[2] = center[2];
cudaMemcpy(m_room->freeObjIds, freeObjIds, freeObjNum* sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(m_room->groupMap, groupMap, MAX_GROUP_ALLOW* sizeof(groupMapStruct), cudaMemcpyHostToDevice);
cudaMemcpy(m_room->pairMap, pairMap, CONSTRAIN_PAIRS* sizeof(pairMapStruct), cudaMemcpyHostToDevice);
for(int i=0;i<wallNum;i++)
m_room->deviceWalls[i] = walls[i];
//
//
// int tMem = colCount*rowCount * sizeof(unsigned char);
// cudaMallocManaged(&furnitureMask, tMem);
// cudaMallocManaged(&furnitureMask_initial, tMem);
// cudaMemcpy(furnitureMask, m_room->furnitureMask, tMem, cudaMemcpyHostToDevice);
// cudaMemcpy(furnitureMask_initial, m_room->furnitureMask_initial, tMem, cudaMemcpyHostToDevice);
// //TODO:obstacle
}
void Room::initialize_room(float s_width, float s_height) {
initialized = true;
groupNum = 0;
half_width = s_width / 2;
half_height = s_height / 2;
overlappingThreshold = s_width * s_height * 0.005;
set_pairwise_map();
rowCount = int(s_height) + 1; colCount = int(s_width)+1;
int tMem = rowCount * colCount * sizeof(unsigned char);
furnitureMask_initial = (unsigned char *)malloc(tMem);
memset(furnitureMask_initial, (unsigned char)0, colCount*rowCount);
}
void Room::add_a_wall(vector<float> params){
wall newWall;
newWall.id = walls.size();
newWall.zheight = params[4];
init_a_wall(&newWall, params);
walls.push_back(newWall);
wallNum++;
if (fabs(fmod(newWall.zrotation, PI)) > 0.01)
update_mask_by_wall(&newWall);
}
void Room::add_an_object(vector<float> params, bool isPrevious, bool isFixed) {
if (params.size() < 15) {
float hw = params[2] / 2, hh = params[3] / 2;
float cx = params[0], cy = params[1];
float res[8] = { -hw + cx, hh + cy, hw + cx, hh + cy, hw + cx, -hh + cy, -hw + cx, -hh + cy };
vector<float>vertices(res, res + 8);// get_vertices_by_pos(params[0], params[1], params[2] / 2, params[3] / 2);
params.insert(params.begin(), vertices.begin(), vertices.end());
}
if (isPrevious) {
switch (int(params[13]))
{
case 1:
params[13] = TYPE_FLOOR;
break;
case 3://chair
params[13] = TYPE_CHAIR;
break;
case 8:
params[13] = TYPE_WALL;
break;
case 10:
params[13] = TYPE_OTHER;
break;
case 11:
params[13] = TYPE_CEILING;
break;
}
}
//default groupid is 0
if(params.size()<16)
params.push_back(0);
init_an_object(params, isFixed, isPrevious);
}
void Room::add_a_focal_point(vector<float> fp) {
int groupId = (fp.size() == 3)? 0:fp[3];
for(int i=0; i<groupNum; i++){
if(groupId == groupMap[i].gid)
copy(fp.begin(), fp.begin()+3, groupMap[i].focal);
}
}
void Room::set_obj_zrotation(singleObj * obj, float nrot) {
float oldRot = obj->zrotation;
nrot = remainderf(nrot, 2*PI);
obj->zrotation = nrot;
float gap = obj->zrotation - oldRot;
float s = sinf(gap); float c=cosf(gap);
float minx = INFINITY,maxx =-INFINITY, miny=INFINITY, maxy = -INFINITY;
for(int i=0; i<4; i++){
rot_around_point(obj->translation, &obj->vertices[2*i], &obj->vertices[2*i+1], s, c);
minx = (obj->vertices[2*i] < minx)? obj->vertices[2*i]:minx;
maxx = (obj->vertices[2*i] > maxx)? obj->vertices[2*i]:maxx;
miny = (obj->vertices[2*i + 1] < miny)? obj->vertices[2*i+1]:miny;
maxy = (obj->vertices[2*i + 1] > maxy)? obj->vertices[2*i+1]:maxy;
}
obj->boundingBox.x = minx; obj->boundingBox.y=maxy;
obj->boundingBox.width = maxx-minx; obj->boundingBox.height = maxy-miny;
}
void Room::add_an_obstacle(vector<float> params) {
obstacles.push_back(params);
}
void Room::get_obstacle_vertices(float * vertices){
for(int i=0; i<obstacles.size();i++)
copy(obstacles[i].begin(), obstacles[i].end(),&vertices[8*i]);
}
// void change_obj_freeState(singleObj* obj) {
// if (obj->isFixed)
// freeObjIds.erase(remove(freeObjIds.begin(), freeObjIds.end(), obj->id));
// else
// freeObjIds.push_back(obj->id);
// obj->isFixed = !obj->isFixed;
// }
// };
|
a78015d4e3849092f6f5451e6dedbbe0aad6d484.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <cassert>
#include <hipcub/hipcub.hpp> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
template <typename T>
__global__ void SliceKernel(int num, int dims, const T *input,
const int *offsets_info, T *output) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int shared_data[];
if (threadIdx.x == 0) {
for (int i = 0; i < dims * 3; i++) {
shared_data[i] = offsets_info[i];
}
}
__syncthreads();
if (idx < num) {
int t_idx = idx;
int in_idx = 0;
for (int i = dims - 1; i >= 0; i--) {
// output_shape
auto t = t_idx % shared_data[i * 3 + 1];
// out offset
auto s = t + shared_data[i * 3];
// input_seg_offset
in_idx = in_idx + shared_data[i * 3 + 2] * s;
t_idx = t_idx / shared_data[i * 3 + 1];
}
output[idx] = input[in_idx];
}
}
int SlicePluginDynamic::initialize() { return 0; }
size_t SlicePluginDynamic::getSerializationSize() const { return 0; }
void SlicePluginDynamic::serialize(void *buffer) const {}
nvinfer1::DimsExprs SlicePluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) {
auto in_dims = inputs[0];
nvinfer1::DimsExprs ret = in_dims;
// start, ends should greater 0
for (size_t i = 0; i < axes_.size(); i++) {
int start = starts_[i];
int end = ends_[i];
ret.d[axes_[i]] = expr_builder.constant(end - start);
}
return ret;
}
bool SlicePluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
#ifdef SUPPORTS_CUDA_FP16
if (ban_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
#else
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
#endif
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SlicePluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types, int nb_inputs) const {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"The Slice Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true, platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int SlicePluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs, void *const *outputs,
void *workspace, hipStream_t stream) {
auto input_dims = input_desc[0].dims;
auto out_dims = output_desc[0].dims;
auto num_dims = input_dims.nbDims;
size_t out_num = ProductDim(out_dims);
std::vector<int> seg_offsets;
std::vector<int> offsets;
std::vector<int> extends;
offsets.reserve(num_dims);
extends.reserve(num_dims);
seg_offsets.reserve(num_dims);
seg_offsets[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1];
}
for (size_t i = 0; i < num_dims; ++i) {
offsets[i] = 0;
extends[i] = out_dims.d[i];
}
for (size_t i = 0; i < axes_.size(); ++i) {
offsets[axes_[i]] = starts_[i];
}
std::vector<int> offset_info;
for (size_t i = 0; i < num_dims; ++i) {
offset_info.push_back(offsets[i]);
offset_info.push_back(extends[i]);
offset_info.push_back(seg_offsets[i]);
}
framework::Tensor offset_temp_tensor;
int device_id;
hipGetDevice(&device_id);
offset_temp_tensor.Resize({3 * num_dims});
auto *offset_temp_data =
offset_temp_tensor.mutable_data<int>(platform::CUDAPlace(device_id));
hipMemcpyAsync(offset_temp_data, offset_info.data(),
sizeof(int) * 3 * num_dims, hipMemcpyHostToDevice, stream);
int threads = 256;
int blocks = (out_num + threads - 1) / threads;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
const float *input1 = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
hipLaunchKernelGGL(( SliceKernel<float>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream,
out_num, num_dims, input1, offset_temp_data, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
#ifdef SUPPORTS_CUDA_FP16
const half *input1 = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
hipLaunchKernelGGL(( SliceKernel<half>), dim3(blocks), dim3(threads), 3 * num_dims * sizeof(int), stream,
out_num, num_dims, input1, offset_temp_data, output);
#else
PADDLE_THROW(platform::errors::Fatal(
"The cuda archs you specific should greater than 600."));
#endif
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Slice TRT Plugin's input type should be float or half."));
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| a78015d4e3849092f6f5451e6dedbbe0aad6d484.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <cuda_runtime.h>
#include <stdio.h>
#include <cassert>
#include <cub/cub.cuh> // NOLINT
#include <vector>
#include "glog/logging.h"
#include "paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/trt_plugin_factory.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
template <typename T>
__global__ void SliceKernel(int num, int dims, const T *input,
const int *offsets_info, T *output) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int shared_data[];
if (threadIdx.x == 0) {
for (int i = 0; i < dims * 3; i++) {
shared_data[i] = offsets_info[i];
}
}
__syncthreads();
if (idx < num) {
int t_idx = idx;
int in_idx = 0;
for (int i = dims - 1; i >= 0; i--) {
// output_shape
auto t = t_idx % shared_data[i * 3 + 1];
// out offset
auto s = t + shared_data[i * 3];
// input_seg_offset
in_idx = in_idx + shared_data[i * 3 + 2] * s;
t_idx = t_idx / shared_data[i * 3 + 1];
}
output[idx] = input[in_idx];
}
}
int SlicePluginDynamic::initialize() { return 0; }
size_t SlicePluginDynamic::getSerializationSize() const { return 0; }
void SlicePluginDynamic::serialize(void *buffer) const {}
nvinfer1::DimsExprs SlicePluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) {
auto in_dims = inputs[0];
nvinfer1::DimsExprs ret = in_dims;
// start, ends should greater 0
for (size_t i = 0; i < axes_.size(); i++) {
int start = starts_[i];
int end = ends_[i];
ret.d[axes_[i]] = expr_builder.constant(end - start);
}
return ret;
}
bool SlicePluginDynamic::supportsFormatCombination(
int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs,
int nb_outputs) {
PADDLE_ENFORCE_NOT_NULL(
in_out, platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos, nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos, nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
#ifdef SUPPORTS_CUDA_FP16
if (ban_fp16_) {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
} else {
return (in.type == nvinfer1::DataType::kFLOAT ||
in.type == nvinfer1::DataType::kHALF) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
#else
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
#endif
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType SlicePluginDynamic::getOutputDataType(
int index, const nvinfer1::DataType *input_types, int nb_inputs) const {
PADDLE_ENFORCE_EQ(index, 0, platform::errors::InvalidArgument(
"The Slice Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT ||
input_types[0] == nvinfer1::DataType::kHALF),
true, platform::errors::InvalidArgument(
"The input type should be half or float"));
return input_types[0];
}
int SlicePluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs, void *const *outputs,
void *workspace, cudaStream_t stream) {
auto input_dims = input_desc[0].dims;
auto out_dims = output_desc[0].dims;
auto num_dims = input_dims.nbDims;
size_t out_num = ProductDim(out_dims);
std::vector<int> seg_offsets;
std::vector<int> offsets;
std::vector<int> extends;
offsets.reserve(num_dims);
extends.reserve(num_dims);
seg_offsets.reserve(num_dims);
seg_offsets[num_dims - 1] = 1;
for (int i = num_dims - 2; i >= 0; i--) {
seg_offsets[i] = input_dims.d[i + 1] * seg_offsets[i + 1];
}
for (size_t i = 0; i < num_dims; ++i) {
offsets[i] = 0;
extends[i] = out_dims.d[i];
}
for (size_t i = 0; i < axes_.size(); ++i) {
offsets[axes_[i]] = starts_[i];
}
std::vector<int> offset_info;
for (size_t i = 0; i < num_dims; ++i) {
offset_info.push_back(offsets[i]);
offset_info.push_back(extends[i]);
offset_info.push_back(seg_offsets[i]);
}
framework::Tensor offset_temp_tensor;
int device_id;
cudaGetDevice(&device_id);
offset_temp_tensor.Resize({3 * num_dims});
auto *offset_temp_data =
offset_temp_tensor.mutable_data<int>(platform::CUDAPlace(device_id));
cudaMemcpyAsync(offset_temp_data, offset_info.data(),
sizeof(int) * 3 * num_dims, cudaMemcpyHostToDevice, stream);
int threads = 256;
int blocks = (out_num + threads - 1) / threads;
auto input_type = input_desc[0].type;
if (input_type == nvinfer1::DataType::kFLOAT) {
const float *input1 = static_cast<const float *>(inputs[0]);
float *output = static_cast<float *>(outputs[0]);
SliceKernel<float><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>(
out_num, num_dims, input1, offset_temp_data, output);
} else if (input_type == nvinfer1::DataType::kHALF) {
#ifdef SUPPORTS_CUDA_FP16
const half *input1 = static_cast<const half *>(inputs[0]);
half *output = static_cast<half *>(outputs[0]);
SliceKernel<half><<<blocks, threads, 3 * num_dims * sizeof(int), stream>>>(
out_num, num_dims, input1, offset_temp_data, output);
#else
PADDLE_THROW(platform::errors::Fatal(
"The cuda archs you specific should greater than 600."));
#endif
} else {
PADDLE_THROW(platform::errors::Fatal(
"The Slice TRT Plugin's input type should be float or half."));
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
90f8c5f2b9f66702f79b3672a6f9695e44370a9f.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <hip/hip_runtime.h>
#include "core/providers/cuda/math/binary_elementwise_ops_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
#include "core/providers/cuda/math/binary_elementwise_ops_impl_functors.cuh"
namespace onnxruntime {
namespace cuda {
#define BINARY_ELEMENTWISE_IMPL(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
BinaryElementWiseImpl(output_rank_or_simple_broadcast, \
lhs_padded_strides, \
lhs_data, \
rhs_padded_strides, \
rhs_data, \
fdm_output_strides, \
fdm_H, \
fdm_C, \
output_data, \
OP_##name<T, T, T>(), \
count); \
}
#define BINARY_ELEMENTWISE_IMPL_T1(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION_T1(name) { \
BinaryElementWiseImpl(output_rank_or_simple_broadcast, \
lhs_padded_strides, \
lhs_data, \
rhs_padded_strides, \
rhs_data, \
fdm_output_strides, \
fdm_H, \
fdm_C, \
output_data, \
OP_##name<T, T, T1>(), \
count); \
}
#define BINARY_ELEMENTWISE_IMPL_T2(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION_T2(name) { \
BinaryElementWiseImpl(output_rank_or_simple_broadcast, \
lhs_padded_strides, \
lhs_data, \
rhs_padded_strides, \
rhs_data, \
fdm_output_strides, \
fdm_H, \
fdm_C, \
output_data, \
OP_##name<T, T1, T2>(), \
count); \
}
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, T) \
template void Impl_##x<T>(int32_t output_rank, \
const TArray<int64_t>* lhs_padded_strides, const T* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, const T* rhs_data, \
const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count);
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, T1) \
template void ImplT1_##x<T, T1>(int32_t output_rank, \
const TArray<int64_t>* lhs_padded_strides, const T* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, const T1* rhs_data, \
const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count);
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(x, T, T1, T2) \
template void ImplT2_##x<T, T1, T2>(int32_t output_rank, \
const TArray<int64_t>* lhs_padded_strides, const T1* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, const T2* rhs_data, \
const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count);
#if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__))
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_BF16(x) SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, nv_bfloat16)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2_BF16(name) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, nv_bfloat16, nv_bfloat16)
#else
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_BF16(x)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2_BF16(name)
#endif
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_BF16(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(x, T) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, int32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, int64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, double)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_OIL(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, bool) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_BF16(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
// create declarations for impl
#define BINARY_OP_NAME_EXPR(name, expr) \
BINARY_ELEMENTWISE_IMPL(name)
BINARY_OPS()
#undef BINARY_OP_NAME_EXPR
// create specialized impl
// the postfix of means the types supported by the op:
// B: uint8_t
// W: uint16_t
// U: uint32_t
// Z: uint64_t
// C: int8_t
// S: int16_t
// I: int32_t
// L: int64_t
// H: float16
// F: float
// D: double
// O: bool
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Add)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Add, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Sub)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Mul)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Div)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(Pow_7)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(And, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Or, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Xor, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(PRelu)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Max)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Min)
// create declarations for impl for Pow
BINARY_ELEMENTWISE_IMPL_T1(Pow)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, int32_t)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, int64_t)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, float)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, double)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, half)
// create declarations for impl2
#define BINARY_OP_NAME_EXPR2(name, expr) \
BINARY_ELEMENTWISE_IMPL_T2(name)
BINARY_OPS2()
#undef BINARY_OP_NAME_EXPR2
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(name) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, uint32_t, uint32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, uint64_t, uint64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, int32_t, int32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, int64_t, int64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, half, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2_BF16(name) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, float, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, double, double)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(Greater)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(Equal)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(Equal, bool, bool, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(Less)
} // namespace cuda
} // namespace onnxruntime
| 90f8c5f2b9f66702f79b3672a6f9695e44370a9f.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cuda_runtime.h>
#include "core/providers/cuda/math/binary_elementwise_ops_impl.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cu_inc/binary_elementwise_impl.cuh"
#include "core/providers/cuda/math/binary_elementwise_ops_impl_functors.cuh"
namespace onnxruntime {
namespace cuda {
#define BINARY_ELEMENTWISE_IMPL(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION(name) { \
BinaryElementWiseImpl(output_rank_or_simple_broadcast, \
lhs_padded_strides, \
lhs_data, \
rhs_padded_strides, \
rhs_data, \
fdm_output_strides, \
fdm_H, \
fdm_C, \
output_data, \
OP_##name<T, T, T>(), \
count); \
}
#define BINARY_ELEMENTWISE_IMPL_T1(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION_T1(name) { \
BinaryElementWiseImpl(output_rank_or_simple_broadcast, \
lhs_padded_strides, \
lhs_data, \
rhs_padded_strides, \
rhs_data, \
fdm_output_strides, \
fdm_H, \
fdm_C, \
output_data, \
OP_##name<T, T, T1>(), \
count); \
}
#define BINARY_ELEMENTWISE_IMPL_T2(name) \
BINARY_ELEMENTWISE_IMPL_DECLARATION_T2(name) { \
BinaryElementWiseImpl(output_rank_or_simple_broadcast, \
lhs_padded_strides, \
lhs_data, \
rhs_padded_strides, \
rhs_data, \
fdm_output_strides, \
fdm_H, \
fdm_C, \
output_data, \
OP_##name<T, T1, T2>(), \
count); \
}
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, T) \
template void Impl_##x<T>(int32_t output_rank, \
const TArray<int64_t>* lhs_padded_strides, const T* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, const T* rhs_data, \
const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count);
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, T1) \
template void ImplT1_##x<T, T1>(int32_t output_rank, \
const TArray<int64_t>* lhs_padded_strides, const T* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, const T1* rhs_data, \
const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count);
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(x, T, T1, T2) \
template void ImplT2_##x<T, T1, T2>(int32_t output_rank, \
const TArray<int64_t>* lhs_padded_strides, const T1* lhs_data, \
const TArray<int64_t>* rhs_padded_strides, const T2* rhs_data, \
const TArray<fast_divmod>* fdm_output_strides, const fast_divmod& fdm_H, const fast_divmod& fdm_C, T* output_data, size_t count);
#if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__))
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_BF16(x) SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, nv_bfloat16)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2_BF16(name) SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, nv_bfloat16, nv_bfloat16)
#else
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_BF16(x)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2_BF16(name)
#endif
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, uint64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_BF16(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(x, T) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, int32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, int64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1(x, T, double)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_OIL(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, bool) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, int64_t)
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_BF16(x) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(x, double)
// create declarations for impl
#define BINARY_OP_NAME_EXPR(name, expr) \
BINARY_ELEMENTWISE_IMPL(name)
BINARY_OPS()
#undef BINARY_OP_NAME_EXPR
// create specialized impl
// the postfix of means the types supported by the op:
// B: uint8_t
// W: uint16_t
// U: uint32_t
// Z: uint64_t
// C: int8_t
// S: int16_t
// I: int32_t
// L: int64_t
// H: float16
// F: float
// D: double
// O: bool
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Add)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Add, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Sub)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Mul)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Div)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(Pow_7)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(And, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Or, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL(Xor, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_HFD(PRelu)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Max)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD(Min)
// create declarations for impl for Pow
BINARY_ELEMENTWISE_IMPL_T1(Pow)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, int32_t)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, int64_t)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, float)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, double)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T1_ILHFD(Pow, half)
// create declarations for impl2
#define BINARY_OP_NAME_EXPR2(name, expr) \
BINARY_ELEMENTWISE_IMPL_T2(name)
BINARY_OPS2()
#undef BINARY_OP_NAME_EXPR2
#define SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(name) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, uint32_t, uint32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, uint64_t, uint64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, int32_t, int32_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, int64_t, int64_t) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, half, half) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2_BF16(name) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, float, float) \
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(name, bool, double, double)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(Greater)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(Equal)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_T2(Equal, bool, bool, bool)
SPECIALIZED_BINARY_ELEMENTWISE_IMPL_UZILHFD2(Less)
} // namespace cuda
} // namespace onnxruntime
|
e5943ca068ea666de84cc49c85912a0867b7d72f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <cstdint>
double maxBW;
void KroneckerGPUSmall(int M, int N, float* A, float* B, float* C);
void KroneckerGPU(int M, int N, float* A, float* B, float* C);
//Error handling micro, wrap it around function whenever possible
static void HandleError(hipError_t err, const char *file, int line) {
if (err != hipSuccess) {
printf("\n%s in %s at line %d\n", hipGetErrorString(err), file, line);
//system("pause");
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
void KroneckerCPU(int M, int N, float* A, float* B, float*C) {
for (int rowA = 0; rowA < M; rowA++) {
for (int colA = 0; colA < M; colA++) {
float elemA = A[rowA * M + colA];
for (int rowB = 0; rowB < N; rowB++) {
int rowC = rowA * N + rowB;
for (int colB = 0; colB < N; colB++) {
int colC = colA * N + colB;
float elemB = B[rowB * N + colB];
C[rowC * (M * N) + colC] = elemA * elemB;
}
}
}
}
}
void PrintMatrix(float* matrix, int M, int N) {
for (int row = 0; row<M; row++)
{
for (int columns = 0; columns<N; columns++)
{
printf("%7.3f ", matrix[row * N + columns]);
}
printf("\n");
}
}
void checkResults(int N, int M, float* gpu_result, float* cpu_result){
for (int i = 0; i < M * N * M * N; i++) {
if (fabs(gpu_result[i] - cpu_result[i]) > 0.01) {
printf("\n Mismatch at index %d: GPU: %f CPU %f\n", i, gpu_result[i], cpu_result[i]);
exit(EXIT_FAILURE);
}
}
printf("\n Ok!!!\n");
}
/***************************************************/
/***************************************************/
/***************************************************/
__global__ void KroneckerKernel(const int M, const int N, float* A, float* B, float* C){
extern __shared__ float shrd_ptr[];
float*sh_B = shrd_ptr;
__shared__ float shrd_C[1024];
__shared__ uint32_t shrd_C_id[1024];
uint32_t tid = threadIdx.x;
uint32_t sh_b_size = (N*N <= 64*64) ? N*N: 64*64;
int b_jump = -sh_b_size;
uint32_t numJumps = max(1,((M*M) / (blockDim.x * gridDim.x))); // at least do one run
for (uint32_t jump_id = 0; jump_id < numJumps; jump_id++){ //how many different groups of elements a block will touch
//groups of elements are elemnts next to one another in A
uint32_t A_id = blockIdx.x * blockDim.x + tid + jump_id*gridDim.x*blockDim.x; //the element A id
if(A_id < M*M){
uint32_t A_i = A_id / M;
uint32_t A_j = A_id % M;
float A_ele = A[A_id];
int id =0;
#pragma unroll
for (uint32_t B_id = 0; B_id < N*N; B_id++){
if(B_id%sh_b_size == 0 ){
__syncthreads();
b_jump += sh_b_size;
for (uint32_t i = tid; i < sh_b_size; i += blockDim.x){
sh_B[i] = B[i+b_jump];
}
__syncthreads();
}
float B_ele;
B_ele = sh_B[B_id-b_jump];
uint32_t B_i = B_id / N;
uint32_t B_j = B_id % N;
uint32_t C_i = A_i*N + B_i;
uint32_t C_j = A_j*N + B_j;
uint32_t C_id = C_i*N*M + C_j;
//C[C_id] = A_ele*B_ele;
shrd_C[tid*blockDim.x + id] = A_ele*B_ele;
shrd_C_id[tid*blockDim.x + id] = C_id;
id++;
if(id == 32){
__syncthreads();
#pragma unroll
for (int i=0; i<1024; i+=32){
C[shrd_C_id[tid+i]] = shrd_C[tid+i];
}
id = 0;
__syncthreads();
}
}
}
}
}
/***************************************************/
/***************************************************/
/***************************************************/
__global__ void KroneckerKernelSmall(const int M, const int N, float* A, float* B, float* C){
//Each thread will read it multiple entry in A, do N multiply and then jump
// N*M (in C array) to do the same N mutliple but with different values in B. Keep jumpping
//N times.
//The above will be packed inside a loop that iterate number of times equal to N*M/numWarps
//**** Move B to shared memory ***//
extern __shared__ float shrd_ptr[];
float*sh_B = shrd_ptr;
__shared__ float shrd_C[1024];
__shared__ uint32_t shrd_C_id[1024];
uint32_t tid = threadIdx.x;
for (uint32_t i = tid; i < N*N; i += blockDim.x){
sh_B[i] = B[i];
}
__syncthreads();
//the indcies
uint32_t numJumps = max(1,((M*M) / (blockDim.x * gridDim.x))); // at least do one run
for (uint32_t jump_id = 0; jump_id < numJumps; jump_id++){ //how many different groups of elements a block will touch
//groups of elements are elemnts next to one another in A
uint32_t A_id = blockIdx.x * blockDim.x + tid + jump_id*gridDim.x*blockDim.x; //the element A id
if(A_id < M*M){
uint32_t A_i = A_id / M;
uint32_t A_j = A_id % M;
float A_ele = A[A_id];
int id =0;
#pragma unroll
for (uint32_t B_id = 0; B_id < N*N; B_id++){
float B_ele = sh_B[B_id];
uint32_t B_i = B_id / N;
uint32_t B_j = B_id % N;
uint32_t C_i = A_i*N + B_i;
uint32_t C_j = A_j*N + B_j;
uint32_t C_id = C_i*N*M + C_j;
//C[C_id] = A_ele*B_ele;
shrd_C[tid*blockDim.x + id] = A_ele*B_ele;
shrd_C_id[tid*blockDim.x + id] = C_id;
id++;
if(id == 32){
__syncthreads();
#pragma unroll
for (int i=0; i<1024; i+=32){
C[shrd_C_id[tid+i]] = shrd_C[tid+i];
//C[tid + i] = shrd_C[ tid + i ];
}
id = 0;
__syncthreads();
}
}
}
}
}
void KroneckerGPUSmall(int M, int N, float* A, float* B, float*C){
//The assumtion here is that B is at most 16kB
//Prefer more L1-cache because we at max need 16kB shared memory
//hipFuncSetCacheConfig(KroneckerKernelSmall, hipFuncCachePreferL1);
hipFuncSetCacheConfig(KroneckerKernelSmall, hipFuncCachePreferShared);
float *d_A, *d_B, *d_C;
const int M2 = M*M;
const int N2 = N*N;
//****allocate memory on device***//
HANDLE_ERROR(hipMallocManaged(&d_A, M2*sizeof(float)));
HANDLE_ERROR(hipMallocManaged(&d_B, N2*sizeof(float)));
HANDLE_ERROR(hipMallocManaged(&d_C, M2*N2*sizeof(float)));
//#define my_debug
//****move data to device***//
for (int i = 0; i<N2; i++){ d_B[i] = B[i]; }
for (int i = 0; i<M2; i++){ d_A[i] = A[i]; }
#ifdef my_debug
const int shrdMem = N2;//amount of shared memory is the size of B (we move it all in shared memory)
float* cpu_result = (float*)malloc(sizeof(float) * M * N * M * N);
KroneckerCPU(M, N, A, B, cpu_result);
HANDLE_ERROR(hipProfilerStart());
hipLaunchKernelGGL(( KroneckerKernelSmall) , dim3(512), dim3(32), shrdMem*sizeof(float) , 0, M, N, d_A, d_B, d_C);
HANDLE_ERROR(hipProfilerStop());
HANDLE_ERROR(hipDeviceSynchronize());
exit(0);
const int numIter = 100;
const float mem = float((sizeof(float)*(N2 + M2 + M2*N2)) / (1024.0f * 1024.0f)); //in mega bytes
int numBlocks=0;
for (int bb = 2; bb <=2048; bb += 2){
int mod = M2 % (bb*32);
if(mod != 0 ){continue;}
numBlocks = bb;
}
numBlocks = 1024;
int numThreads = 32;
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));//timing
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
for (int i = 0; i<numIter; i++){
hipLaunchKernelGGL(( KroneckerKernelSmall) , dim3(numBlocks), dim3(numThreads), shrdMem*sizeof(float) , 0, M, N, d_A, d_B, d_C);
}
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
float time = 0.0f;
HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
time /= numIter; //time in mSec
time /= 1000;//time in seconds
float myBW = mem / time;
float gflops = (float(M2)*float(N2))/(1024.0f*1024.0f*1024.0f);
std::cout << "\n M= "<<M << " N= "<< N <<" numThreads= " << numThreads << " numBlocks= " << numBlocks << " time(sec)= " << time << " BW(MB/s)= " << myBW
<< " BW_prec= " << 100*(myBW / maxBW)<<" GFLOPs/Sec= "<< gflops/time << std::endl;
#else
const int shrdMem = N2;//amount of shared memory is the size of B (we move it all in shared memory)
int numBlocks = 1024;
int numThreads = 32;
hipLaunchKernelGGL(( KroneckerKernelSmall) , dim3(numBlocks), dim3(numThreads), shrdMem*sizeof(float) , 0, M, N, d_A, d_B, d_C);
HANDLE_ERROR(hipDeviceSynchronize());
#endif
//****move data to host***//
for (int i = 0; i<N2*M2; i++){ C[i] = d_C[i]; }
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
HANDLE_ERROR(hipDeviceReset());
}
/*
int main(int argc, char* argv[]) {
if (argc != 3){
std::cout << "\n Usage ./kron [M][N]" << std::endl;
std::cout << " M and N is the size of matrix A and B respectively." << std::endl;
std::cout << " M and N should be power of 2" << std::endl;
exit(EXIT_FAILURE);
}
const int M = atoi(argv[1]);
const int N = atoi(argv[2]);
if (ceil(log2(float(M))) != floor(log2(float(M))) || ceil(log2(float(N))) != floor(log2(float(N)))){
std::cout << "\n M and N should be power of 2" << std::endl;
exit(EXIT_FAILURE);
}
//const int M = 32; //A size
//const int N = 32; //B size
///////////////////////////////////////////////////////////////////////////////////////////////
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0){
printf("\n deviceCount is zero. I quit!!!");
exit(EXIT_FAILURE);
}
const int dev = (deviceCount == 1) ? 0 : 3;
hipSetDevice(dev);
hipDeviceProp_t devProp;
HANDLE_ERROR(hipGetDeviceProperties(&devProp, dev));
printf("\n Total number of device: %d", deviceCount);
printf("\n Using device Number: %d", dev);
printf("\n Device name: %s", devProp.name);
printf("\n devProp.major: %d", devProp.major);
printf("\n devProp.minor: %d", devProp.minor);
printf("\n Total global memory (GB): %zu", devProp.totalGlobalMem>>30 );
if (devProp.major == 1){//Fermi
if (devProp.minor == 1){
printf("\n SM Count: %d", devProp.multiProcessorCount * 48);
}
else{
printf("\n SM Count: %d", devProp.multiProcessorCount * 32);
}
}
else if (devProp.major == 3){//Kepler
printf("\n SM Count: %d", devProp.multiProcessorCount * 192);
}
else if (devProp.major == 5){//Maxwell
printf("\n SM Count: %d", devProp.multiProcessorCount * 128);
}
else if (devProp.major == 6){//Pascal
if (devProp.minor == 1){
printf("\n SM Count: %d", devProp.multiProcessorCount * 128);
}
else if (devProp.minor == 0){
printf("\n SM Count: %d", devProp.multiProcessorCount * 64);
}
}
printf("\n Compute Capability: v%d.%d", (int)devProp.major, (int)devProp.minor);
printf("\n Memory Clock Rate: %d(kHz)", devProp.memoryClockRate);
printf("\n Memory Bus Width: %d(bits)", devProp.memoryBusWidth);
maxBW = 2.0 * devProp.memoryClockRate*(devProp.memoryBusWidth / 8.0) / 1.0E3;
//printf("\n Peak Memory Bandwidth: %f(MB/s)\n", maxBW);
///////////////////////////////////////////////////////////////////////////////////////////////
float* A = (float*)malloc(sizeof(float) * M * M);
float* B = (float*)malloc(sizeof(float) * N * N);
float* cpu_result = (float*)malloc(sizeof(float) * M * N * M * N);
float* gpu_result = (float*)malloc(sizeof(float) * M * N * M * N);
for (int i = 0; i < M * M; i++){
A[i] = float(i + 1);
}
for (int i = 0; i < N * N; i++){
B[i] = float(i + 1);
}
KroneckerGPU(M, N, A, B, gpu_result);
KroneckerCPU(M, N, A, B, cpu_result);
checkResults(N, M, gpu_result, cpu_result);
printf("\nDone %f %f\n", gpu_result[M * N * M * N - 1], cpu_result[M * N * M * N - 1]);
free(A);
free(B);
free(cpu_result);
free(gpu_result);
return 0;
}*/
//****************************************************************************//
void KroneckerGPU(int M, int N, float* A, float* B, float* C) {
//The assumtion here is that B is at most 16kB
//Prefer more L1-cache because we at max need 16kB shared memory
//hipFuncSetCacheConfig(KroneckerKernelSmall, hipFuncCachePreferL1);
hipFuncSetCacheConfig(KroneckerKernelSmall, hipFuncCachePreferShared);
float *d_A, *d_B, *d_C;
const int M2 = M*M;
const int N2 = N*N;
//****allocate memory on device***//
HANDLE_ERROR(hipMallocManaged(&d_A, M2*sizeof(float)));
HANDLE_ERROR(hipMallocManaged(&d_B, N2*sizeof(float)));
HANDLE_ERROR(hipMallocManaged(&d_C, M2*N2*sizeof(float)));
//#define my_debug
//****move data to device***//
for (int i = 0; i<N2; i++){ d_B[i] = B[i]; }
for (int i = 0; i<M2; i++){ d_A[i] = A[i]; }
#ifdef my_debug
const int shrdMem = (N*N <= 64*64) ? N*N: 64*64;//amount of shared memory is the size of B (we move it all in shared memory)
const int numIter = 1;
const float mem = float((sizeof(float)*(N2 + M2 + M2*N2)) / (1024.0f * 1024.0f)); //in mega bytes
int numBlocks = 1024;
int numThreads = 32;
hipEvent_t start, stop;
HANDLE_ERROR(hipEventCreate(&start));//timing
HANDLE_ERROR(hipEventCreate(&stop));
HANDLE_ERROR(hipEventRecord(start, 0));
for (int i = 0; i<numIter; i++){
hipLaunchKernelGGL(( KroneckerKernel) , dim3(numBlocks), dim3(numThreads), shrdMem*sizeof(float) , 0, M, N, d_A, d_B, d_C);
}
HANDLE_ERROR(hipDeviceSynchronize());
HANDLE_ERROR(hipEventRecord(stop, 0));
HANDLE_ERROR(hipEventSynchronize(stop));
float time = 0.0f;
HANDLE_ERROR(hipEventElapsedTime(&time, start, stop));
time /= numIter; //time in mSec
time /= 1000;//time in seconds
float myBW = mem / time;
float gflops = (float(M2)*float(N2))/(1024.0f*1024.0f*1024.0f);
std::cout << "\n M= "<<M << " N= "<< N <<" numThreads= " << numThreads << " numBlocks= " << numBlocks << " time(sec)= " << time << " BW(MB/s)= " << myBW
<< " BW_prec= " << 100*(myBW / maxBW)<<" GFLOPs/Sec= "<< gflops/time << std::endl;
#else
const int shrdMem = (N*N <= 64*64) ? N*N: 64*64;//amount of shared memory is the size of B (we move it all in shared memory)
int numBlocks = 2048;
int numThreads = 32;
hipLaunchKernelGGL(( KroneckerKernel) , dim3(numBlocks), dim3(numThreads), shrdMem*sizeof(float) , 0, M, N, d_A, d_B, d_C);
HANDLE_ERROR(hipDeviceSynchronize());
#endif
//****move data to host***//
for (int i = 0; i<N2*M2; i++){ C[i] = d_C[i]; }
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
HANDLE_ERROR(hipDeviceReset());
}
| e5943ca068ea666de84cc49c85912a0867b7d72f.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda_profiler_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <cstdint>
double maxBW;
void KroneckerGPUSmall(int M, int N, float* A, float* B, float* C);
void KroneckerGPU(int M, int N, float* A, float* B, float* C);
//Error handling micro, wrap it around function whenever possible
static void HandleError(cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
printf("\n%s in %s at line %d\n", cudaGetErrorString(err), file, line);
//system("pause");
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
void KroneckerCPU(int M, int N, float* A, float* B, float*C) {
for (int rowA = 0; rowA < M; rowA++) {
for (int colA = 0; colA < M; colA++) {
float elemA = A[rowA * M + colA];
for (int rowB = 0; rowB < N; rowB++) {
int rowC = rowA * N + rowB;
for (int colB = 0; colB < N; colB++) {
int colC = colA * N + colB;
float elemB = B[rowB * N + colB];
C[rowC * (M * N) + colC] = elemA * elemB;
}
}
}
}
}
void PrintMatrix(float* matrix, int M, int N) {
for (int row = 0; row<M; row++)
{
for (int columns = 0; columns<N; columns++)
{
printf("%7.3f ", matrix[row * N + columns]);
}
printf("\n");
}
}
void checkResults(int N, int M, float* gpu_result, float* cpu_result){
for (int i = 0; i < M * N * M * N; i++) {
if (fabs(gpu_result[i] - cpu_result[i]) > 0.01) {
printf("\n Mismatch at index %d: GPU: %f CPU %f\n", i, gpu_result[i], cpu_result[i]);
exit(EXIT_FAILURE);
}
}
printf("\n Ok!!!\n");
}
/***************************************************/
/***************************************************/
/***************************************************/
__global__ void KroneckerKernel(const int M, const int N, float* A, float* B, float* C){
extern __shared__ float shrd_ptr[];
float*sh_B = shrd_ptr;
__shared__ float shrd_C[1024];
__shared__ uint32_t shrd_C_id[1024];
uint32_t tid = threadIdx.x;
uint32_t sh_b_size = (N*N <= 64*64) ? N*N: 64*64;
int b_jump = -sh_b_size;
uint32_t numJumps = max(1,((M*M) / (blockDim.x * gridDim.x))); // at least do one run
for (uint32_t jump_id = 0; jump_id < numJumps; jump_id++){ //how many different groups of elements a block will touch
//groups of elements are elemnts next to one another in A
uint32_t A_id = blockIdx.x * blockDim.x + tid + jump_id*gridDim.x*blockDim.x; //the element A id
if(A_id < M*M){
uint32_t A_i = A_id / M;
uint32_t A_j = A_id % M;
float A_ele = A[A_id];
int id =0;
#pragma unroll
for (uint32_t B_id = 0; B_id < N*N; B_id++){
if(B_id%sh_b_size == 0 ){
__syncthreads();
b_jump += sh_b_size;
for (uint32_t i = tid; i < sh_b_size; i += blockDim.x){
sh_B[i] = B[i+b_jump];
}
__syncthreads();
}
float B_ele;
B_ele = sh_B[B_id-b_jump];
uint32_t B_i = B_id / N;
uint32_t B_j = B_id % N;
uint32_t C_i = A_i*N + B_i;
uint32_t C_j = A_j*N + B_j;
uint32_t C_id = C_i*N*M + C_j;
//C[C_id] = A_ele*B_ele;
shrd_C[tid*blockDim.x + id] = A_ele*B_ele;
shrd_C_id[tid*blockDim.x + id] = C_id;
id++;
if(id == 32){
__syncthreads();
#pragma unroll
for (int i=0; i<1024; i+=32){
C[shrd_C_id[tid+i]] = shrd_C[tid+i];
}
id = 0;
__syncthreads();
}
}
}
}
}
/***************************************************/
/***************************************************/
/***************************************************/
__global__ void KroneckerKernelSmall(const int M, const int N, float* A, float* B, float* C){
//Each thread will read it multiple entry in A, do N multiply and then jump
// N*M (in C array) to do the same N mutliple but with different values in B. Keep jumpping
//N times.
//The above will be packed inside a loop that iterate number of times equal to N*M/numWarps
//**** Move B to shared memory ***//
extern __shared__ float shrd_ptr[];
float*sh_B = shrd_ptr;
__shared__ float shrd_C[1024];
__shared__ uint32_t shrd_C_id[1024];
uint32_t tid = threadIdx.x;
for (uint32_t i = tid; i < N*N; i += blockDim.x){
sh_B[i] = B[i];
}
__syncthreads();
//the indcies
uint32_t numJumps = max(1,((M*M) / (blockDim.x * gridDim.x))); // at least do one run
for (uint32_t jump_id = 0; jump_id < numJumps; jump_id++){ //how many different groups of elements a block will touch
//groups of elements are elemnts next to one another in A
uint32_t A_id = blockIdx.x * blockDim.x + tid + jump_id*gridDim.x*blockDim.x; //the element A id
if(A_id < M*M){
uint32_t A_i = A_id / M;
uint32_t A_j = A_id % M;
float A_ele = A[A_id];
int id =0;
#pragma unroll
for (uint32_t B_id = 0; B_id < N*N; B_id++){
float B_ele = sh_B[B_id];
uint32_t B_i = B_id / N;
uint32_t B_j = B_id % N;
uint32_t C_i = A_i*N + B_i;
uint32_t C_j = A_j*N + B_j;
uint32_t C_id = C_i*N*M + C_j;
//C[C_id] = A_ele*B_ele;
shrd_C[tid*blockDim.x + id] = A_ele*B_ele;
shrd_C_id[tid*blockDim.x + id] = C_id;
id++;
if(id == 32){
__syncthreads();
#pragma unroll
for (int i=0; i<1024; i+=32){
C[shrd_C_id[tid+i]] = shrd_C[tid+i];
//C[tid + i] = shrd_C[ tid + i ];
}
id = 0;
__syncthreads();
}
}
}
}
}
void KroneckerGPUSmall(int M, int N, float* A, float* B, float*C){
//The assumtion here is that B is at most 16kB
//Prefer more L1-cache because we at max need 16kB shared memory
//cudaFuncSetCacheConfig(KroneckerKernelSmall, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(KroneckerKernelSmall, cudaFuncCachePreferShared);
float *d_A, *d_B, *d_C;
const int M2 = M*M;
const int N2 = N*N;
//****allocate memory on device***//
HANDLE_ERROR(cudaMallocManaged(&d_A, M2*sizeof(float)));
HANDLE_ERROR(cudaMallocManaged(&d_B, N2*sizeof(float)));
HANDLE_ERROR(cudaMallocManaged(&d_C, M2*N2*sizeof(float)));
//#define my_debug
//****move data to device***//
for (int i = 0; i<N2; i++){ d_B[i] = B[i]; }
for (int i = 0; i<M2; i++){ d_A[i] = A[i]; }
#ifdef my_debug
const int shrdMem = N2;//amount of shared memory is the size of B (we move it all in shared memory)
float* cpu_result = (float*)malloc(sizeof(float) * M * N * M * N);
KroneckerCPU(M, N, A, B, cpu_result);
HANDLE_ERROR(cudaProfilerStart());
KroneckerKernelSmall <<< 512, 32, shrdMem*sizeof(float) >>>(M, N, d_A, d_B, d_C);
HANDLE_ERROR(cudaProfilerStop());
HANDLE_ERROR(cudaDeviceSynchronize());
exit(0);
const int numIter = 100;
const float mem = float((sizeof(float)*(N2 + M2 + M2*N2)) / (1024.0f * 1024.0f)); //in mega bytes
int numBlocks=0;
for (int bb = 2; bb <=2048; bb += 2){
int mod = M2 % (bb*32);
if(mod != 0 ){continue;}
numBlocks = bb;
}
numBlocks = 1024;
int numThreads = 32;
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));//timing
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
for (int i = 0; i<numIter; i++){
KroneckerKernelSmall <<< numBlocks, numThreads, shrdMem*sizeof(float) >>>(M, N, d_A, d_B, d_C);
}
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
float time = 0.0f;
HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
time /= numIter; //time in mSec
time /= 1000;//time in seconds
float myBW = mem / time;
float gflops = (float(M2)*float(N2))/(1024.0f*1024.0f*1024.0f);
std::cout << "\n M= "<<M << " N= "<< N <<" numThreads= " << numThreads << " numBlocks= " << numBlocks << " time(sec)= " << time << " BW(MB/s)= " << myBW
<< " BW_prec= " << 100*(myBW / maxBW)<<" GFLOPs/Sec= "<< gflops/time << std::endl;
#else
const int shrdMem = N2;//amount of shared memory is the size of B (we move it all in shared memory)
int numBlocks = 1024;
int numThreads = 32;
KroneckerKernelSmall <<< numBlocks, numThreads, shrdMem*sizeof(float) >>>(M, N, d_A, d_B, d_C);
HANDLE_ERROR(cudaDeviceSynchronize());
#endif
//****move data to host***//
for (int i = 0; i<N2*M2; i++){ C[i] = d_C[i]; }
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
HANDLE_ERROR(cudaDeviceReset());
}
/*
int main(int argc, char* argv[]) {
if (argc != 3){
std::cout << "\n Usage ./kron [M][N]" << std::endl;
std::cout << " M and N is the size of matrix A and B respectively." << std::endl;
std::cout << " M and N should be power of 2" << std::endl;
exit(EXIT_FAILURE);
}
const int M = atoi(argv[1]);
const int N = atoi(argv[2]);
if (ceil(log2(float(M))) != floor(log2(float(M))) || ceil(log2(float(N))) != floor(log2(float(N)))){
std::cout << "\n M and N should be power of 2" << std::endl;
exit(EXIT_FAILURE);
}
//const int M = 32; //A size
//const int N = 32; //B size
///////////////////////////////////////////////////////////////////////////////////////////////
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0){
printf("\n deviceCount is zero. I quit!!!");
exit(EXIT_FAILURE);
}
const int dev = (deviceCount == 1) ? 0 : 3;
cudaSetDevice(dev);
cudaDeviceProp devProp;
HANDLE_ERROR(cudaGetDeviceProperties(&devProp, dev));
printf("\n Total number of device: %d", deviceCount);
printf("\n Using device Number: %d", dev);
printf("\n Device name: %s", devProp.name);
printf("\n devProp.major: %d", devProp.major);
printf("\n devProp.minor: %d", devProp.minor);
printf("\n Total global memory (GB): %zu", devProp.totalGlobalMem>>30 );
if (devProp.major == 1){//Fermi
if (devProp.minor == 1){
printf("\n SM Count: %d", devProp.multiProcessorCount * 48);
}
else{
printf("\n SM Count: %d", devProp.multiProcessorCount * 32);
}
}
else if (devProp.major == 3){//Kepler
printf("\n SM Count: %d", devProp.multiProcessorCount * 192);
}
else if (devProp.major == 5){//Maxwell
printf("\n SM Count: %d", devProp.multiProcessorCount * 128);
}
else if (devProp.major == 6){//Pascal
if (devProp.minor == 1){
printf("\n SM Count: %d", devProp.multiProcessorCount * 128);
}
else if (devProp.minor == 0){
printf("\n SM Count: %d", devProp.multiProcessorCount * 64);
}
}
printf("\n Compute Capability: v%d.%d", (int)devProp.major, (int)devProp.minor);
printf("\n Memory Clock Rate: %d(kHz)", devProp.memoryClockRate);
printf("\n Memory Bus Width: %d(bits)", devProp.memoryBusWidth);
maxBW = 2.0 * devProp.memoryClockRate*(devProp.memoryBusWidth / 8.0) / 1.0E3;
//printf("\n Peak Memory Bandwidth: %f(MB/s)\n", maxBW);
///////////////////////////////////////////////////////////////////////////////////////////////
float* A = (float*)malloc(sizeof(float) * M * M);
float* B = (float*)malloc(sizeof(float) * N * N);
float* cpu_result = (float*)malloc(sizeof(float) * M * N * M * N);
float* gpu_result = (float*)malloc(sizeof(float) * M * N * M * N);
for (int i = 0; i < M * M; i++){
A[i] = float(i + 1);
}
for (int i = 0; i < N * N; i++){
B[i] = float(i + 1);
}
KroneckerGPU(M, N, A, B, gpu_result);
KroneckerCPU(M, N, A, B, cpu_result);
checkResults(N, M, gpu_result, cpu_result);
printf("\nDone %f %f\n", gpu_result[M * N * M * N - 1], cpu_result[M * N * M * N - 1]);
free(A);
free(B);
free(cpu_result);
free(gpu_result);
return 0;
}*/
//****************************************************************************//
void KroneckerGPU(int M, int N, float* A, float* B, float* C) {
//The assumtion here is that B is at most 16kB
//Prefer more L1-cache because we at max need 16kB shared memory
//cudaFuncSetCacheConfig(KroneckerKernelSmall, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(KroneckerKernelSmall, cudaFuncCachePreferShared);
float *d_A, *d_B, *d_C;
const int M2 = M*M;
const int N2 = N*N;
//****allocate memory on device***//
HANDLE_ERROR(cudaMallocManaged(&d_A, M2*sizeof(float)));
HANDLE_ERROR(cudaMallocManaged(&d_B, N2*sizeof(float)));
HANDLE_ERROR(cudaMallocManaged(&d_C, M2*N2*sizeof(float)));
//#define my_debug
//****move data to device***//
for (int i = 0; i<N2; i++){ d_B[i] = B[i]; }
for (int i = 0; i<M2; i++){ d_A[i] = A[i]; }
#ifdef my_debug
const int shrdMem = (N*N <= 64*64) ? N*N: 64*64;//amount of shared memory is the size of B (we move it all in shared memory)
const int numIter = 1;
const float mem = float((sizeof(float)*(N2 + M2 + M2*N2)) / (1024.0f * 1024.0f)); //in mega bytes
int numBlocks = 1024;
int numThreads = 32;
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));//timing
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
for (int i = 0; i<numIter; i++){
KroneckerKernel <<< numBlocks, numThreads, shrdMem*sizeof(float) >>>(M, N, d_A, d_B, d_C);
}
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
float time = 0.0f;
HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
time /= numIter; //time in mSec
time /= 1000;//time in seconds
float myBW = mem / time;
float gflops = (float(M2)*float(N2))/(1024.0f*1024.0f*1024.0f);
std::cout << "\n M= "<<M << " N= "<< N <<" numThreads= " << numThreads << " numBlocks= " << numBlocks << " time(sec)= " << time << " BW(MB/s)= " << myBW
<< " BW_prec= " << 100*(myBW / maxBW)<<" GFLOPs/Sec= "<< gflops/time << std::endl;
#else
const int shrdMem = (N*N <= 64*64) ? N*N: 64*64;//amount of shared memory is the size of B (we move it all in shared memory)
int numBlocks = 2048;
int numThreads = 32;
KroneckerKernel <<< numBlocks, numThreads, shrdMem*sizeof(float) >>>(M, N, d_A, d_B, d_C);
HANDLE_ERROR(cudaDeviceSynchronize());
#endif
//****move data to host***//
for (int i = 0; i<N2*M2; i++){ C[i] = d_C[i]; }
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
HANDLE_ERROR(cudaDeviceReset());
}
|
b6479a9d3b1b52ffde394973d0e651d197cf742d.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------------------------------
// Copyrighted by Marko Rakita.
// Author: Marko Rakita
// File contains: Mocked neural network convolutional layer, used in tests.
// Created: 01/27/2016.
// ----------------------------------------------------------------------------------------------------
#include "include/mockconvolutionallayer.cuh"
MockConvolutionalLayer::MockConvolutionalLayer(uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, uint numFilters, uint filterWidth,
uint filterHeight, uint numFilterChannels, float weightsDeviation, float biasesInitialValue, float filtersUpdateMomentum, float filtersUpdateDecay,
float filtersUpdateLearningRateProgressStep, float filtersUpdateStartingLearningRate, float filtersUpdateLearningRateUpdateFactor, float biasesUpdateMomentum,
float biasesUpdateDecay, float biasesUpdateLearningRateProgressStep, float biasesUpdateStartingLearningRate, float biasesUpdateLearningRateUpdateFactor,
int paddingX, int paddingY, uint stride, ActivationType activationType)
{
m_layerType = LayerType::Convolutional;
m_indexInTier = 0;
m_tierSize = 1;
m_inputNumChannels = inputNumChannels;
m_inputDataWidth = inputDataWidth;
m_inputDataHeight = inputDataHeight;
m_inputDataSize = m_inputDataWidth * m_inputDataHeight;
m_inputDataCount = inputDataCount;
m_holdsInputData = true;
m_activationType = activationType;
m_numFilters = numFilters;
m_filterWidth = filterWidth;
m_filterHeight = filterHeight;
m_filterSize = m_filterWidth * m_filterHeight;
m_numFilterChannels = numFilterChannels;
m_filtersUpdateMomentum = filtersUpdateMomentum;
m_filtersUpdateDecay = filtersUpdateDecay;
m_filtersUpdateLearningRateProgressStep = filtersUpdateLearningRateProgressStep;
m_filtersUpdateStartingLearningRate = filtersUpdateStartingLearningRate;
m_filtersUpdateLearningRateUpdateFactor = filtersUpdateLearningRateUpdateFactor;
m_biasesUpdateMomentum = biasesUpdateMomentum;
m_biasesUpdateDecay = biasesUpdateDecay;
m_biasesUpdateLearningRateProgressStep = biasesUpdateLearningRateProgressStep;
m_biasesUpdateStartingLearningRate = biasesUpdateStartingLearningRate;
m_biasesUpdateLearningRateUpdateFactor = biasesUpdateLearningRateUpdateFactor;
m_paddingX = paddingX;
m_paddingY = paddingY;
m_stride = stride;
m_numPatchesX = 1 + (uint)ceil((double)(2 * paddingX + m_inputDataWidth - m_filterWidth) / m_stride);
m_numPatchesY = 1 + (uint)ceil((double)(2 * paddingY + m_inputDataHeight - m_filterHeight) / m_stride);
m_activationNumChannels = m_numFilters;
m_activationDataWidth = m_numPatchesX;
m_activationDataHeight = m_numPatchesY;
m_activationDataSize = m_activationDataWidth * m_activationDataHeight;
// Allocating input data buffer.
m_inputBufferSize = m_inputNumChannels * m_inputDataSize * m_inputDataCount * sizeof(float);
if (m_holdsInputData)
{
CudaAssert(hipHostMalloc<float>(&m_inputDataBuffer, m_inputBufferSize));
}
// Allocating input gradients buffer.
CudaAssert(hipHostMalloc<float>(&m_inputGradientsBuffer, m_inputBufferSize));
// Allocating filters buffers.
m_filtersBufferSize = m_numFilters * m_filterSize * m_numFilterChannels * sizeof(float);
CudaAssert(hipHostMalloc<float>(&m_filtersBuffer, m_filtersBufferSize));
CudaAssert(hipHostMalloc<float>(&m_filtersGradientsBuffer, m_filtersBufferSize));
CudaAssert(hipHostMalloc<float>(&m_filtersUpdateBuffer, m_filtersBufferSize));
// Initializing filter weights.
InitializeFilterWeights(weightsDeviation);
InitializeBuffer(m_filtersUpdateBuffer, m_filtersBufferSize, 0.f);
// Allocating biases buffers.
m_biasesBufferSize = m_numFilters * sizeof(float);
CudaAssert(hipHostMalloc<float>(&m_biasesBuffer, m_biasesBufferSize));
CudaAssert(hipHostMalloc<float>(&m_biasesGradientsBuffer, m_biasesBufferSize));
CudaAssert(hipHostMalloc<float>(&m_biasesUpdateBuffer, m_biasesBufferSize));
// Initializing biases.
InitializeBuffer(m_biasesBuffer, m_biasesBufferSize, biasesInitialValue);
InitializeBuffer(m_biasesUpdateBuffer, m_biasesBufferSize, 0.f);
// Allocating preactivation and activation data buffers.
m_activationBufferSize = m_numFilters * m_activationDataSize * m_inputDataCount * sizeof(float);
CudaAssert(hipHostMalloc<float>(&m_preactivationDataBuffer, m_activationBufferSize));
CudaAssert(hipHostMalloc<float>(&m_activationDataBuffer, m_activationBufferSize));
// Allocating preactivation gradients buffer.
CudaAssert(hipHostMalloc<float>(&m_preactivationGradientsBuffer, m_activationBufferSize));
// Allocating activation gradients buffer.
m_holdsActivationGradients = true;
if (m_holdsActivationGradients)
{
CudaAssert(hipHostMalloc<float>(&m_activationGradientsBuffer, m_activationBufferSize));
}
}
void MockConvolutionalLayer::InitializeFilterWeights(float weightsDeviation)
{
default_random_engine generator((uint)chrono::system_clock::now().time_since_epoch().count());
normal_distribution<float> distribution(0.f, weightsDeviation);
size_t filtersBufferLength = m_filtersBufferSize / sizeof(float);
for (size_t i = 0; i < filtersBufferLength; ++i)
{
m_filtersBuffer[i] = distribution(generator);
}
}
void MockConvolutionalLayer::InitializeBuffer(float* buffer, size_t bufferSize, float initialValue)
{
size_t bufferLength = bufferSize / sizeof(float);
for (size_t i = 0; i < bufferLength; ++i)
{
buffer[i] = initialValue;
}
}
MockConvolutionalLayer::~MockConvolutionalLayer()
{
if (m_holdsInputData)
{
CudaAssert(hipHostFree(m_inputDataBuffer));
}
m_inputDataBuffer = NULL;
CudaAssert(hipHostFree(m_inputGradientsBuffer));
m_inputGradientsBuffer = NULL;
CudaAssert(hipHostFree(m_filtersBuffer));
CudaAssert(hipHostFree(m_filtersGradientsBuffer));
CudaAssert(hipHostFree(m_filtersUpdateBuffer));
CudaAssert(hipHostFree(m_biasesBuffer));
CudaAssert(hipHostFree(m_biasesGradientsBuffer));
CudaAssert(hipHostFree(m_biasesUpdateBuffer));
CudaAssert(hipHostFree(m_preactivationDataBuffer));
CudaAssert(hipHostFree(m_activationDataBuffer));
m_activationDataBuffer = NULL;
CudaAssert(hipHostFree(m_preactivationGradientsBuffer));
if (m_holdsActivationGradients)
{
CudaAssert(hipHostFree(m_activationGradientsBuffer));
}
m_activationGradientsBuffer = NULL;
}
void MockConvolutionalLayer::LoadInputs()
{
TestingAssert(m_prevLayers.size() == 1, "We do not support more than one previous layer in tests, for now.");
CudaAssert(hipMemcpy(m_inputDataBuffer, m_prevLayers[0]->GetActivationDataBuffer(), m_inputBufferSize, hipMemcpyDeviceToHost));
}
void MockConvolutionalLayer::LoadActivationGradients()
{
TestingAssert(m_nextLayers.size() == 1, "We do not support more than one previous layer in tests, for now.");
CudaAssert(hipMemcpy(m_activationGradientsBuffer, m_nextLayers[0]->GetInputGradientsBuffer(), m_activationBufferSize, hipMemcpyDeviceToHost));
}
void MockConvolutionalLayer::CalculatePreactivations()
{
for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex)
{
for (uint filterIndex = 0; filterIndex < m_numFilters; ++filterIndex)
{
const uint c_activationChannelOffset = m_activationDataSize * filterIndex * m_inputDataCount;
for (uint channel = 0; channel < m_inputNumChannels; ++channel)
{
const uint c_filtersChannelOffset = channel * m_numFilters * m_filterSize;
const uint c_dataChannelOffset = channel * m_inputDataCount * m_inputDataSize;
int startY = -m_paddingY;
for (uint patchY = 0; patchY < m_numPatchesY; ++patchY)
{
int startX = -m_paddingX;
for (uint patchX = 0; patchX < m_numPatchesX; ++patchX)
{
const uint c_activationDataIndex = c_activationChannelOffset + (patchY * m_numPatchesX + patchX) * m_inputDataCount + dataIndex;
if (channel == 0)
{
m_preactivationDataBuffer[c_activationDataIndex] = 0.0f;
}
for (int currY = startY; currY < startY + (int)m_filterHeight; ++currY)
{
for (int currX = startX; currX < startX + (int)m_filterWidth; ++currX)
{
if (currY >= 0 && currY < (int)m_inputDataHeight && currX >= 0 && currX < (int)m_inputDataWidth)
{
m_preactivationDataBuffer[c_activationDataIndex] +=
m_filtersBuffer[c_filtersChannelOffset + ((currY - startY) * m_filterWidth + currX - startX) * m_numFilters + filterIndex] *
m_inputDataBuffer[c_dataChannelOffset + (currY * m_inputDataWidth + currX) * m_inputDataCount + dataIndex];
}
}
}
startX += m_stride;
}
startY += m_stride;
}
}
}
}
}
void MockConvolutionalLayer::AddBiases()
{
const uint c_width = m_inputDataCount * m_numPatchesY * m_numPatchesX;
for (uint filterIndex = 0; filterIndex < m_numFilters; ++filterIndex)
{
for (uint i = 0; i < c_width; ++i)
{
m_preactivationDataBuffer[filterIndex * c_width + i] += m_biasesBuffer[filterIndex];
}
}
}
void MockConvolutionalLayer::CalculateActivations()
{
for (uint i = 0; i < m_activationBufferSize / sizeof(float); ++i)
{
if (m_activationType == ActivationType::ReLu)
{
m_activationDataBuffer[i] = m_preactivationDataBuffer[i] < 0.0f ? 0.0f : m_preactivationDataBuffer[i];
}
else if (m_activationType == ActivationType::Sigmoid)
{
m_activationDataBuffer[i] = 1 / (1 + exp(-m_preactivationDataBuffer[i]));
}
else if (m_activationType == ActivationType::Tanh)
{
m_activationDataBuffer[i] = 1.0f - 2.0f / (exp(2.0f * m_preactivationDataBuffer[i]) + 1.0f);
}
else
{
TestingAssert(false, "Unknown activation type!");
}
}
}
void MockConvolutionalLayer::DoForwardProp(PropagationMode propagationMode)
{
CalculatePreactivations();
AddBiases();
CalculateActivations();
}
void MockConvolutionalLayer::CalculateBiasesGradients()
{
uint batchSize = m_parallelismMode == ParallelismMode::Model ? m_inputDataCount : m_tierSize * m_inputDataCount;
const uint c_width = m_inputDataCount * m_numPatchesY * m_numPatchesX;
for (uint filterIndex = 0; filterIndex < m_numFilters; ++filterIndex)
{
float biasGradient = 0.f;
for (uint i = 0; i < c_width; ++i)
{
biasGradient += m_preactivationGradientsBuffer[filterIndex * c_width + i];
}
m_biasesGradientsBuffer[filterIndex] = biasGradient / (float)batchSize;
}
}
void MockConvolutionalLayer::CalculateWeightsGradients()
{
// Initializing gradients to zero.
size_t filtersBufferLength = m_filtersBufferSize / sizeof(float);
for (size_t i = 0; i < filtersBufferLength; ++i)
{
m_filtersGradientsBuffer[i] = 0.f;
}
// Calculating gradients.
for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex)
{
for (uint filterIndex = 0; filterIndex < m_numFilters; ++filterIndex)
{
const uint c_activationChannelOffset = m_activationDataSize * filterIndex * m_inputDataCount;
for (uint channel = 0; channel < m_inputNumChannels; ++channel)
{
const uint c_filtersChannelOffset = channel * m_numFilters * m_filterSize;
const uint c_dataChannelOffset = channel * m_inputDataCount * m_inputDataSize;
int startY = -m_paddingY;
for (uint patchY = 0; patchY < m_numPatchesY; ++patchY)
{
int startX = -m_paddingX;
for (uint patchX = 0; patchX < m_numPatchesX; ++patchX)
{
const uint c_activationDataIndex = c_activationChannelOffset + (patchY * m_numPatchesX + patchX) * m_inputDataCount + dataIndex;
if (channel == 0)
{
m_preactivationDataBuffer[c_activationDataIndex] = 0.0f;
}
for (int currY = startY; currY < startY + (int)m_filterHeight; ++currY)
{
for (int currX = startX; currX < startX + (int)m_filterWidth; ++currX)
{
if (currY >= 0 && currY < (int)m_inputDataHeight && currX >= 0 && currX < (int)m_inputDataWidth)
{
m_filtersGradientsBuffer[c_filtersChannelOffset + ((currY - startY) * m_filterWidth + currX - startX) * m_numFilters + filterIndex] +=
m_preactivationGradientsBuffer[c_activationDataIndex] *
m_inputDataBuffer[c_dataChannelOffset + (currY * m_inputDataWidth + currX) * m_inputDataCount + dataIndex];
}
}
}
startX += (int)m_stride;
}
startY += (int)m_stride;
}
}
}
}
// Scaling gradients with batch size.
float batchSize = m_parallelismMode == ParallelismMode::Model ? (float)m_inputDataCount : (float)(m_tierSize * m_inputDataCount);
for (size_t i = 0; i < filtersBufferLength; ++i)
{
m_filtersGradientsBuffer[i] /= batchSize;
}
}
void MockConvolutionalLayer::CalculateInputGradients()
{
for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex)
{
for (uint channel = 0; channel < m_inputNumChannels; ++channel)
{
for (uint pixelY = 0; pixelY < m_inputDataHeight; ++pixelY)
{
for (uint pixelX = 0; pixelX < m_inputDataWidth; ++pixelX)
{
const uint c_currPixel = pixelY * m_inputDataWidth + pixelX;
const uint c_firstPatchX = pixelX + m_paddingX < m_filterWidth ? 0 : (pixelX + m_paddingX - m_filterWidth) / m_stride + 1;
const uint c_firstPatchY = pixelY + m_paddingY < m_filterHeight ? 0 : (pixelY + m_paddingY - m_filterHeight) / m_stride + 1;
const uint c_lastPatchX = min(m_numPatchesX, (pixelX + m_paddingX) / m_stride + 1);
const uint c_lastPatchY = min(m_numPatchesY, (pixelY + m_paddingY) / m_stride + 1);
float gradient = 0.0f;
for (uint currPatchY = c_firstPatchY; currPatchY < c_lastPatchY; ++currPatchY)
{
const uint c_filterPixelY = pixelY + m_paddingY - currPatchY * m_stride;
for (uint currPatchX = c_firstPatchX; currPatchX < c_lastPatchX; ++currPatchX)
{
const uint c_filterPixelX = pixelX + m_paddingX - currPatchX * m_stride;
const uint c_filterPixel = c_filterPixelY * m_filterWidth + c_filterPixelX;
const uint c_currPatch = currPatchY * m_numPatchesX + currPatchX;
for (uint currFilter = 0; currFilter < m_numFilters; ++currFilter)
{
gradient += m_filtersBuffer[(channel * m_filterSize + c_filterPixel) * m_numFilters + currFilter] *
m_preactivationGradientsBuffer[(currFilter * m_numPatchesX * m_numPatchesY + c_currPatch) * m_inputDataCount + dataIndex];
}
}
}
m_inputGradientsBuffer[(channel * m_inputDataSize + c_currPixel) * m_inputDataCount + dataIndex] = gradient;
}
}
}
}
}
void MockConvolutionalLayer::CalculatePreactivationsGradients()
{
for (uint i = 0; i < m_activationBufferSize / sizeof(float); ++i)
{
if (m_activationType == ActivationType::ReLu)
{
m_preactivationGradientsBuffer[i] = m_activationGradientsBuffer[i] * (m_activationDataBuffer[i] > 0.0f ? 1.0f : 0.0f);
}
else if (m_activationType == ActivationType::Sigmoid)
{
m_preactivationGradientsBuffer[i] = m_activationGradientsBuffer[i] * m_activationDataBuffer[i] * (1.0f - m_activationDataBuffer[i]);
}
else if (m_activationType == ActivationType::Tanh)
{
m_preactivationGradientsBuffer[i] = m_activationGradientsBuffer[i] * (1.0f - m_activationDataBuffer[i] * m_activationDataBuffer[i]);
}
else
{
TestingAssert(false, "Unknown activation type!");
}
}
}
void MockConvolutionalLayer::DoBackwardProp()
{
CalculatePreactivationsGradients();
CalculateInputGradients();
CalculateWeightsGradients();
CalculateBiasesGradients();
}
void MockConvolutionalLayer::UpdateLayerParameters(float learningProgress)
{
// Updating filters.
float filtersUpdateProgressSteps = floorf(learningProgress / m_filtersUpdateLearningRateProgressStep);
const float filtersLearningRate = m_filtersUpdateStartingLearningRate * powf(m_filtersUpdateLearningRateUpdateFactor, filtersUpdateProgressSteps);
for (uint i = 0; i < m_filtersBufferSize / sizeof(float); ++i)
{
m_filtersUpdateBuffer[i] = m_filtersUpdateMomentum * m_filtersUpdateBuffer[i] + filtersLearningRate * (m_filtersGradientsBuffer[i] -
m_filtersUpdateDecay * m_filtersBuffer[i]);
m_filtersBuffer[i] += m_filtersUpdateBuffer[i];
}
// Updating biases.
float biasesUpdateProgressSteps = floorf(learningProgress / m_biasesUpdateLearningRateProgressStep);
const float biasesLearningRate = m_biasesUpdateStartingLearningRate * powf(m_biasesUpdateLearningRateUpdateFactor, biasesUpdateProgressSteps);
for (uint i = 0; i < m_biasesBufferSize / sizeof(float); ++i)
{
m_biasesUpdateBuffer[i] = m_biasesUpdateMomentum * m_biasesUpdateBuffer[i] + biasesLearningRate * (m_biasesGradientsBuffer[i] -
m_biasesUpdateDecay * m_biasesBuffer[i]);
m_biasesBuffer[i] += m_biasesUpdateBuffer[i];
}
} | b6479a9d3b1b52ffde394973d0e651d197cf742d.cu | // ----------------------------------------------------------------------------------------------------
// Copyrighted by Marko Rakita.
// Author: Marko Rakita
// File contains: Mocked neural network convolutional layer, used in tests.
// Created: 01/27/2016.
// ----------------------------------------------------------------------------------------------------
#include "include/mockconvolutionallayer.cuh"
MockConvolutionalLayer::MockConvolutionalLayer(uint inputNumChannels, uint inputDataWidth, uint inputDataHeight, uint inputDataCount, uint numFilters, uint filterWidth,
uint filterHeight, uint numFilterChannels, float weightsDeviation, float biasesInitialValue, float filtersUpdateMomentum, float filtersUpdateDecay,
float filtersUpdateLearningRateProgressStep, float filtersUpdateStartingLearningRate, float filtersUpdateLearningRateUpdateFactor, float biasesUpdateMomentum,
float biasesUpdateDecay, float biasesUpdateLearningRateProgressStep, float biasesUpdateStartingLearningRate, float biasesUpdateLearningRateUpdateFactor,
int paddingX, int paddingY, uint stride, ActivationType activationType)
{
m_layerType = LayerType::Convolutional;
m_indexInTier = 0;
m_tierSize = 1;
m_inputNumChannels = inputNumChannels;
m_inputDataWidth = inputDataWidth;
m_inputDataHeight = inputDataHeight;
m_inputDataSize = m_inputDataWidth * m_inputDataHeight;
m_inputDataCount = inputDataCount;
m_holdsInputData = true;
m_activationType = activationType;
m_numFilters = numFilters;
m_filterWidth = filterWidth;
m_filterHeight = filterHeight;
m_filterSize = m_filterWidth * m_filterHeight;
m_numFilterChannels = numFilterChannels;
m_filtersUpdateMomentum = filtersUpdateMomentum;
m_filtersUpdateDecay = filtersUpdateDecay;
m_filtersUpdateLearningRateProgressStep = filtersUpdateLearningRateProgressStep;
m_filtersUpdateStartingLearningRate = filtersUpdateStartingLearningRate;
m_filtersUpdateLearningRateUpdateFactor = filtersUpdateLearningRateUpdateFactor;
m_biasesUpdateMomentum = biasesUpdateMomentum;
m_biasesUpdateDecay = biasesUpdateDecay;
m_biasesUpdateLearningRateProgressStep = biasesUpdateLearningRateProgressStep;
m_biasesUpdateStartingLearningRate = biasesUpdateStartingLearningRate;
m_biasesUpdateLearningRateUpdateFactor = biasesUpdateLearningRateUpdateFactor;
m_paddingX = paddingX;
m_paddingY = paddingY;
m_stride = stride;
m_numPatchesX = 1 + (uint)ceil((double)(2 * paddingX + m_inputDataWidth - m_filterWidth) / m_stride);
m_numPatchesY = 1 + (uint)ceil((double)(2 * paddingY + m_inputDataHeight - m_filterHeight) / m_stride);
m_activationNumChannels = m_numFilters;
m_activationDataWidth = m_numPatchesX;
m_activationDataHeight = m_numPatchesY;
m_activationDataSize = m_activationDataWidth * m_activationDataHeight;
// Allocating input data buffer.
m_inputBufferSize = m_inputNumChannels * m_inputDataSize * m_inputDataCount * sizeof(float);
if (m_holdsInputData)
{
CudaAssert(cudaMallocHost<float>(&m_inputDataBuffer, m_inputBufferSize));
}
// Allocating input gradients buffer.
CudaAssert(cudaMallocHost<float>(&m_inputGradientsBuffer, m_inputBufferSize));
// Allocating filters buffers.
m_filtersBufferSize = m_numFilters * m_filterSize * m_numFilterChannels * sizeof(float);
CudaAssert(cudaMallocHost<float>(&m_filtersBuffer, m_filtersBufferSize));
CudaAssert(cudaMallocHost<float>(&m_filtersGradientsBuffer, m_filtersBufferSize));
CudaAssert(cudaMallocHost<float>(&m_filtersUpdateBuffer, m_filtersBufferSize));
// Initializing filter weights.
InitializeFilterWeights(weightsDeviation);
InitializeBuffer(m_filtersUpdateBuffer, m_filtersBufferSize, 0.f);
// Allocating biases buffers.
m_biasesBufferSize = m_numFilters * sizeof(float);
CudaAssert(cudaMallocHost<float>(&m_biasesBuffer, m_biasesBufferSize));
CudaAssert(cudaMallocHost<float>(&m_biasesGradientsBuffer, m_biasesBufferSize));
CudaAssert(cudaMallocHost<float>(&m_biasesUpdateBuffer, m_biasesBufferSize));
// Initializing biases.
InitializeBuffer(m_biasesBuffer, m_biasesBufferSize, biasesInitialValue);
InitializeBuffer(m_biasesUpdateBuffer, m_biasesBufferSize, 0.f);
// Allocating preactivation and activation data buffers.
m_activationBufferSize = m_numFilters * m_activationDataSize * m_inputDataCount * sizeof(float);
CudaAssert(cudaMallocHost<float>(&m_preactivationDataBuffer, m_activationBufferSize));
CudaAssert(cudaMallocHost<float>(&m_activationDataBuffer, m_activationBufferSize));
// Allocating preactivation gradients buffer.
CudaAssert(cudaMallocHost<float>(&m_preactivationGradientsBuffer, m_activationBufferSize));
// Allocating activation gradients buffer.
m_holdsActivationGradients = true;
if (m_holdsActivationGradients)
{
CudaAssert(cudaMallocHost<float>(&m_activationGradientsBuffer, m_activationBufferSize));
}
}
void MockConvolutionalLayer::InitializeFilterWeights(float weightsDeviation)
{
default_random_engine generator((uint)chrono::system_clock::now().time_since_epoch().count());
normal_distribution<float> distribution(0.f, weightsDeviation);
size_t filtersBufferLength = m_filtersBufferSize / sizeof(float);
for (size_t i = 0; i < filtersBufferLength; ++i)
{
m_filtersBuffer[i] = distribution(generator);
}
}
void MockConvolutionalLayer::InitializeBuffer(float* buffer, size_t bufferSize, float initialValue)
{
size_t bufferLength = bufferSize / sizeof(float);
for (size_t i = 0; i < bufferLength; ++i)
{
buffer[i] = initialValue;
}
}
MockConvolutionalLayer::~MockConvolutionalLayer()
{
if (m_holdsInputData)
{
CudaAssert(cudaFreeHost(m_inputDataBuffer));
}
m_inputDataBuffer = NULL;
CudaAssert(cudaFreeHost(m_inputGradientsBuffer));
m_inputGradientsBuffer = NULL;
CudaAssert(cudaFreeHost(m_filtersBuffer));
CudaAssert(cudaFreeHost(m_filtersGradientsBuffer));
CudaAssert(cudaFreeHost(m_filtersUpdateBuffer));
CudaAssert(cudaFreeHost(m_biasesBuffer));
CudaAssert(cudaFreeHost(m_biasesGradientsBuffer));
CudaAssert(cudaFreeHost(m_biasesUpdateBuffer));
CudaAssert(cudaFreeHost(m_preactivationDataBuffer));
CudaAssert(cudaFreeHost(m_activationDataBuffer));
m_activationDataBuffer = NULL;
CudaAssert(cudaFreeHost(m_preactivationGradientsBuffer));
if (m_holdsActivationGradients)
{
CudaAssert(cudaFreeHost(m_activationGradientsBuffer));
}
m_activationGradientsBuffer = NULL;
}
void MockConvolutionalLayer::LoadInputs()
{
TestingAssert(m_prevLayers.size() == 1, "We do not support more than one previous layer in tests, for now.");
CudaAssert(cudaMemcpy(m_inputDataBuffer, m_prevLayers[0]->GetActivationDataBuffer(), m_inputBufferSize, cudaMemcpyDeviceToHost));
}
void MockConvolutionalLayer::LoadActivationGradients()
{
TestingAssert(m_nextLayers.size() == 1, "We do not support more than one previous layer in tests, for now.");
CudaAssert(cudaMemcpy(m_activationGradientsBuffer, m_nextLayers[0]->GetInputGradientsBuffer(), m_activationBufferSize, cudaMemcpyDeviceToHost));
}
void MockConvolutionalLayer::CalculatePreactivations()
{
for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex)
{
for (uint filterIndex = 0; filterIndex < m_numFilters; ++filterIndex)
{
const uint c_activationChannelOffset = m_activationDataSize * filterIndex * m_inputDataCount;
for (uint channel = 0; channel < m_inputNumChannels; ++channel)
{
const uint c_filtersChannelOffset = channel * m_numFilters * m_filterSize;
const uint c_dataChannelOffset = channel * m_inputDataCount * m_inputDataSize;
int startY = -m_paddingY;
for (uint patchY = 0; patchY < m_numPatchesY; ++patchY)
{
int startX = -m_paddingX;
for (uint patchX = 0; patchX < m_numPatchesX; ++patchX)
{
const uint c_activationDataIndex = c_activationChannelOffset + (patchY * m_numPatchesX + patchX) * m_inputDataCount + dataIndex;
if (channel == 0)
{
m_preactivationDataBuffer[c_activationDataIndex] = 0.0f;
}
for (int currY = startY; currY < startY + (int)m_filterHeight; ++currY)
{
for (int currX = startX; currX < startX + (int)m_filterWidth; ++currX)
{
if (currY >= 0 && currY < (int)m_inputDataHeight && currX >= 0 && currX < (int)m_inputDataWidth)
{
m_preactivationDataBuffer[c_activationDataIndex] +=
m_filtersBuffer[c_filtersChannelOffset + ((currY - startY) * m_filterWidth + currX - startX) * m_numFilters + filterIndex] *
m_inputDataBuffer[c_dataChannelOffset + (currY * m_inputDataWidth + currX) * m_inputDataCount + dataIndex];
}
}
}
startX += m_stride;
}
startY += m_stride;
}
}
}
}
}
void MockConvolutionalLayer::AddBiases()
{
const uint c_width = m_inputDataCount * m_numPatchesY * m_numPatchesX;
for (uint filterIndex = 0; filterIndex < m_numFilters; ++filterIndex)
{
for (uint i = 0; i < c_width; ++i)
{
m_preactivationDataBuffer[filterIndex * c_width + i] += m_biasesBuffer[filterIndex];
}
}
}
void MockConvolutionalLayer::CalculateActivations()
{
for (uint i = 0; i < m_activationBufferSize / sizeof(float); ++i)
{
if (m_activationType == ActivationType::ReLu)
{
m_activationDataBuffer[i] = m_preactivationDataBuffer[i] < 0.0f ? 0.0f : m_preactivationDataBuffer[i];
}
else if (m_activationType == ActivationType::Sigmoid)
{
m_activationDataBuffer[i] = 1 / (1 + exp(-m_preactivationDataBuffer[i]));
}
else if (m_activationType == ActivationType::Tanh)
{
m_activationDataBuffer[i] = 1.0f - 2.0f / (exp(2.0f * m_preactivationDataBuffer[i]) + 1.0f);
}
else
{
TestingAssert(false, "Unknown activation type!");
}
}
}
void MockConvolutionalLayer::DoForwardProp(PropagationMode propagationMode)
{
CalculatePreactivations();
AddBiases();
CalculateActivations();
}
void MockConvolutionalLayer::CalculateBiasesGradients()
{
uint batchSize = m_parallelismMode == ParallelismMode::Model ? m_inputDataCount : m_tierSize * m_inputDataCount;
const uint c_width = m_inputDataCount * m_numPatchesY * m_numPatchesX;
for (uint filterIndex = 0; filterIndex < m_numFilters; ++filterIndex)
{
float biasGradient = 0.f;
for (uint i = 0; i < c_width; ++i)
{
biasGradient += m_preactivationGradientsBuffer[filterIndex * c_width + i];
}
m_biasesGradientsBuffer[filterIndex] = biasGradient / (float)batchSize;
}
}
void MockConvolutionalLayer::CalculateWeightsGradients()
{
// Initializing gradients to zero.
size_t filtersBufferLength = m_filtersBufferSize / sizeof(float);
for (size_t i = 0; i < filtersBufferLength; ++i)
{
m_filtersGradientsBuffer[i] = 0.f;
}
// Calculating gradients.
for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex)
{
for (uint filterIndex = 0; filterIndex < m_numFilters; ++filterIndex)
{
const uint c_activationChannelOffset = m_activationDataSize * filterIndex * m_inputDataCount;
for (uint channel = 0; channel < m_inputNumChannels; ++channel)
{
const uint c_filtersChannelOffset = channel * m_numFilters * m_filterSize;
const uint c_dataChannelOffset = channel * m_inputDataCount * m_inputDataSize;
int startY = -m_paddingY;
for (uint patchY = 0; patchY < m_numPatchesY; ++patchY)
{
int startX = -m_paddingX;
for (uint patchX = 0; patchX < m_numPatchesX; ++patchX)
{
const uint c_activationDataIndex = c_activationChannelOffset + (patchY * m_numPatchesX + patchX) * m_inputDataCount + dataIndex;
if (channel == 0)
{
m_preactivationDataBuffer[c_activationDataIndex] = 0.0f;
}
for (int currY = startY; currY < startY + (int)m_filterHeight; ++currY)
{
for (int currX = startX; currX < startX + (int)m_filterWidth; ++currX)
{
if (currY >= 0 && currY < (int)m_inputDataHeight && currX >= 0 && currX < (int)m_inputDataWidth)
{
m_filtersGradientsBuffer[c_filtersChannelOffset + ((currY - startY) * m_filterWidth + currX - startX) * m_numFilters + filterIndex] +=
m_preactivationGradientsBuffer[c_activationDataIndex] *
m_inputDataBuffer[c_dataChannelOffset + (currY * m_inputDataWidth + currX) * m_inputDataCount + dataIndex];
}
}
}
startX += (int)m_stride;
}
startY += (int)m_stride;
}
}
}
}
// Scaling gradients with batch size.
float batchSize = m_parallelismMode == ParallelismMode::Model ? (float)m_inputDataCount : (float)(m_tierSize * m_inputDataCount);
for (size_t i = 0; i < filtersBufferLength; ++i)
{
m_filtersGradientsBuffer[i] /= batchSize;
}
}
void MockConvolutionalLayer::CalculateInputGradients()
{
for (uint dataIndex = 0; dataIndex < m_inputDataCount; ++dataIndex)
{
for (uint channel = 0; channel < m_inputNumChannels; ++channel)
{
for (uint pixelY = 0; pixelY < m_inputDataHeight; ++pixelY)
{
for (uint pixelX = 0; pixelX < m_inputDataWidth; ++pixelX)
{
const uint c_currPixel = pixelY * m_inputDataWidth + pixelX;
const uint c_firstPatchX = pixelX + m_paddingX < m_filterWidth ? 0 : (pixelX + m_paddingX - m_filterWidth) / m_stride + 1;
const uint c_firstPatchY = pixelY + m_paddingY < m_filterHeight ? 0 : (pixelY + m_paddingY - m_filterHeight) / m_stride + 1;
const uint c_lastPatchX = min(m_numPatchesX, (pixelX + m_paddingX) / m_stride + 1);
const uint c_lastPatchY = min(m_numPatchesY, (pixelY + m_paddingY) / m_stride + 1);
float gradient = 0.0f;
for (uint currPatchY = c_firstPatchY; currPatchY < c_lastPatchY; ++currPatchY)
{
const uint c_filterPixelY = pixelY + m_paddingY - currPatchY * m_stride;
for (uint currPatchX = c_firstPatchX; currPatchX < c_lastPatchX; ++currPatchX)
{
const uint c_filterPixelX = pixelX + m_paddingX - currPatchX * m_stride;
const uint c_filterPixel = c_filterPixelY * m_filterWidth + c_filterPixelX;
const uint c_currPatch = currPatchY * m_numPatchesX + currPatchX;
for (uint currFilter = 0; currFilter < m_numFilters; ++currFilter)
{
gradient += m_filtersBuffer[(channel * m_filterSize + c_filterPixel) * m_numFilters + currFilter] *
m_preactivationGradientsBuffer[(currFilter * m_numPatchesX * m_numPatchesY + c_currPatch) * m_inputDataCount + dataIndex];
}
}
}
m_inputGradientsBuffer[(channel * m_inputDataSize + c_currPixel) * m_inputDataCount + dataIndex] = gradient;
}
}
}
}
}
void MockConvolutionalLayer::CalculatePreactivationsGradients()
{
for (uint i = 0; i < m_activationBufferSize / sizeof(float); ++i)
{
if (m_activationType == ActivationType::ReLu)
{
m_preactivationGradientsBuffer[i] = m_activationGradientsBuffer[i] * (m_activationDataBuffer[i] > 0.0f ? 1.0f : 0.0f);
}
else if (m_activationType == ActivationType::Sigmoid)
{
m_preactivationGradientsBuffer[i] = m_activationGradientsBuffer[i] * m_activationDataBuffer[i] * (1.0f - m_activationDataBuffer[i]);
}
else if (m_activationType == ActivationType::Tanh)
{
m_preactivationGradientsBuffer[i] = m_activationGradientsBuffer[i] * (1.0f - m_activationDataBuffer[i] * m_activationDataBuffer[i]);
}
else
{
TestingAssert(false, "Unknown activation type!");
}
}
}
void MockConvolutionalLayer::DoBackwardProp()
{
CalculatePreactivationsGradients();
CalculateInputGradients();
CalculateWeightsGradients();
CalculateBiasesGradients();
}
void MockConvolutionalLayer::UpdateLayerParameters(float learningProgress)
{
// Updating filters.
float filtersUpdateProgressSteps = floorf(learningProgress / m_filtersUpdateLearningRateProgressStep);
const float filtersLearningRate = m_filtersUpdateStartingLearningRate * powf(m_filtersUpdateLearningRateUpdateFactor, filtersUpdateProgressSteps);
for (uint i = 0; i < m_filtersBufferSize / sizeof(float); ++i)
{
m_filtersUpdateBuffer[i] = m_filtersUpdateMomentum * m_filtersUpdateBuffer[i] + filtersLearningRate * (m_filtersGradientsBuffer[i] -
m_filtersUpdateDecay * m_filtersBuffer[i]);
m_filtersBuffer[i] += m_filtersUpdateBuffer[i];
}
// Updating biases.
float biasesUpdateProgressSteps = floorf(learningProgress / m_biasesUpdateLearningRateProgressStep);
const float biasesLearningRate = m_biasesUpdateStartingLearningRate * powf(m_biasesUpdateLearningRateUpdateFactor, biasesUpdateProgressSteps);
for (uint i = 0; i < m_biasesBufferSize / sizeof(float); ++i)
{
m_biasesUpdateBuffer[i] = m_biasesUpdateMomentum * m_biasesUpdateBuffer[i] + biasesLearningRate * (m_biasesGradientsBuffer[i] -
m_biasesUpdateDecay * m_biasesBuffer[i]);
m_biasesBuffer[i] += m_biasesUpdateBuffer[i];
}
} |
2e7d14d7942bd49e4f15252854d3b9f159ff9550.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void Transpose_ker(float * dst, float * src, int size)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size && j < size)
dst[i * size + j] = src[j * size + i];
}
void cuTranspose(float * dst, float * src, int size)
{
size = (size + 16 - 1) / 16 * 16;
dim3 dimBlock(16, 16);
dim3 dimGrid(size / 16, size / 16);
hipLaunchKernelGGL(( Transpose_ker), dim3(dimGrid), dim3(dimBlock), 0, 0, dst, src, size);
} | 2e7d14d7942bd49e4f15252854d3b9f159ff9550.cu | __global__ void Transpose_ker(float * dst, float * src, int size)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size && j < size)
dst[i * size + j] = src[j * size + i];
}
void cuTranspose(float * dst, float * src, int size)
{
size = (size + 16 - 1) / 16 * 16;
dim3 dimBlock(16, 16);
dim3 dimGrid(size / 16, size / 16);
Transpose_ker<<<dimGrid, dimBlock>>>(dst, src, size);
} |
4e95aeb4dd4afeae7ea8dd647235dd99cb61a24a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "column_stats.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <hipcub/hipcub.hpp>
#include <math_constants.h>
constexpr int block_size = 1024;
namespace cudf {
namespace io {
/**
* @brief shared state for statistics gather kernel
*/
struct stats_state_s {
stats_column_desc col; ///< Column information
statistics_group group; ///< Group description
statistics_chunk ck; ///< Output statistics chunk
volatile statistics_val warp_min[32]; ///< Min reduction scratch
volatile statistics_val warp_max[32]; ///< Max reduction scratch
};
/**
* @brief shared state for statistics merge kernel
*/
struct merge_state_s {
stats_column_desc col; ///< Column information
statistics_merge_group group; ///< Group description
statistics_chunk ck; ///< Resulting statistics chunk
volatile statistics_val warp_min[32]; ///< Min reduction scratch
volatile statistics_val warp_max[32]; ///< Max reduction scratch
};
/**
* Custom addition functor to ignore NaN inputs
*/
struct IgnoreNaNSum {
__device__ __forceinline__ double operator()(const double &a, const double &b)
{
double aval = isnan(a) ? 0 : a;
double bval = isnan(b) ? 0 : b;
return aval + bval;
}
};
/**
* Warp-wide Min reduction for string types
*/
inline __device__ string_stats WarpReduceMinString(const char *smin, uint32_t lmin)
{
uint32_t len = shuffle_xor(lmin, 1);
const char *ptr =
reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smin), 1));
if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) {
smin = ptr;
lmin = len;
}
len = shuffle_xor(lmin, 2);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smin), 2));
if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) {
smin = ptr;
lmin = len;
}
len = shuffle_xor(lmin, 4);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smin), 4));
if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) {
smin = ptr;
lmin = len;
}
len = shuffle_xor(lmin, 8);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smin), 8));
if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) {
smin = ptr;
lmin = len;
}
len = shuffle_xor(lmin, 16);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smin), 16));
if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) {
smin = ptr;
lmin = len;
}
return {smin, lmin};
}
/**
* Warp-wide Max reduction for string types
*/
inline __device__ string_stats WarpReduceMaxString(const char *smax, uint32_t lmax)
{
uint32_t len = shuffle_xor(lmax, 1);
const char *ptr =
reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smax), 1));
if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) {
smax = ptr;
lmax = len;
}
len = shuffle_xor(lmax, 2);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smax), 2));
if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) {
smax = ptr;
lmax = len;
}
len = shuffle_xor(lmax, 4);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smax), 4));
if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) {
smax = ptr;
lmax = len;
}
len = shuffle_xor(lmax, 8);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smax), 8));
if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) {
smax = ptr;
lmax = len;
}
len = shuffle_xor(lmax, 16);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smax), 16));
if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) {
smax = ptr;
lmax = len;
}
return {smax, lmax};
}
/**
* @brief Gather statistics for integer-like columns
*
* @param s shared block state
* @param dtype data type
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__
gatherIntColumnStats(stats_state_s *s, statistics_dtype dtype, uint32_t t, Storage &storage)
{
using block_reduce = hipcub::BlockReduce<int64_t, block_size>;
int64_t vmin = INT64_MAX;
int64_t vmax = INT64_MIN;
int64_t vsum = 0;
int64_t v;
uint32_t nn_cnt = 0;
__shared__ volatile bool has_minmax;
for (uint32_t i = 0; i < s->group.num_rows; i += block_size) {
uint32_t r = i + t;
uint32_t row = r + s->group.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_values)
? (valid_map) ? (valid_map[(row + s->col.column_offset) / 32] >>
((row + s->col.column_offset) % 32)) &
1
: 1
: 0;
if (is_valid) {
switch (dtype) {
case dtype_int32:
case dtype_date32: v = static_cast<const int32_t *>(s->col.column_data_base)[row]; break;
case dtype_int64:
case dtype_decimal64: v = static_cast<const int64_t *>(s->col.column_data_base)[row]; break;
case dtype_int16: v = static_cast<const int16_t *>(s->col.column_data_base)[row]; break;
case dtype_timestamp64:
v = static_cast<const int64_t *>(s->col.column_data_base)[row];
if (s->col.ts_scale < -1) {
v /= -s->col.ts_scale;
} else if (s->col.ts_scale > 1) {
v *= s->col.ts_scale;
}
break;
default: v = static_cast<const int8_t *>(s->col.column_data_base)[row]; break;
}
vmin = min(vmin, v);
vmax = max(vmax, v);
vsum += v;
}
nn_cnt += __syncthreads_count(is_valid);
}
if (!t) {
s->ck.non_nulls = nn_cnt;
s->ck.null_count = s->group.num_rows - nn_cnt;
}
vmin = block_reduce(storage.integer_stats).Reduce(vmin, hipcub::Min());
__syncthreads();
vmax = block_reduce(storage.integer_stats).Reduce(vmax, hipcub::Max());
if (!t) { has_minmax = (vmin <= vmax); }
__syncthreads();
if (has_minmax) { vsum = block_reduce(storage.integer_stats).Sum(vsum); }
if (!t) {
if (has_minmax) {
s->ck.min_value.i_val = vmin;
s->ck.max_value.i_val = vmax;
s->ck.sum.i_val = vsum;
}
s->ck.has_minmax = has_minmax;
// TODO: For now, don't set the sum flag with 64-bit values so we don't have to check for
// 64-bit sum overflow
s->ck.has_sum = (dtype <= dtype_int32 && has_minmax);
}
}
/**
* @brief Gather statistics for floating-point columns
*
* @param s shared block state
* @param dtype data type
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__
gatherFloatColumnStats(stats_state_s *s, statistics_dtype dtype, uint32_t t, Storage &storage)
{
using block_reduce = hipcub::BlockReduce<double, block_size>;
double vmin = CUDART_INF;
double vmax = -CUDART_INF;
double vsum = 0;
double v;
uint32_t nn_cnt = 0;
__shared__ volatile bool has_minmax;
for (uint32_t i = 0; i < s->group.num_rows; i += block_size) {
uint32_t r = i + t;
uint32_t row = r + s->group.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_values)
? (valid_map) ? (valid_map[(row + s->col.column_offset) >> 5] >>
((row + s->col.column_offset) & 0x1f)) &
1
: 1
: 0;
if (is_valid) {
if (dtype == dtype_float64) {
v = static_cast<const double *>(s->col.column_data_base)[row];
} else {
v = static_cast<const float *>(s->col.column_data_base)[row];
}
if (v < vmin) { vmin = v; }
if (v > vmax) { vmax = v; }
if (!isnan(v)) { vsum += v; }
}
nn_cnt += __syncthreads_count(is_valid);
}
if (!t) {
s->ck.non_nulls = nn_cnt;
s->ck.null_count = s->group.num_rows - nn_cnt;
}
vmin = block_reduce(storage.float_stats).Reduce(vmin, hipcub::Min());
__syncthreads();
vmax = block_reduce(storage.float_stats).Reduce(vmax, hipcub::Max());
if (!t) { has_minmax = (vmin <= vmax); }
__syncthreads();
if (has_minmax) { vsum = block_reduce(storage.float_stats).Reduce(vsum, IgnoreNaNSum()); }
if (!t) {
if (has_minmax) {
s->ck.min_value.fp_val = (vmin != 0.0) ? vmin : CUDART_NEG_ZERO;
s->ck.max_value.fp_val = (vmax != 0.0) ? vmax : CUDART_ZERO;
s->ck.sum.fp_val = vsum;
}
s->ck.has_minmax = has_minmax;
s->ck.has_sum = has_minmax; // Implies sum is valid as well
}
}
// FIXME: Use native libcudf string type
struct nvstrdesc_s {
const char *ptr;
size_t count;
};
/**
* @brief Gather statistics for string columns
*
* @param s shared block state
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__ gatherStringColumnStats(stats_state_s *s, uint32_t t, Storage &storage)
{
using block_reduce = hipcub::BlockReduce<uint32_t, block_size>;
uint32_t len_sum = 0;
const char *smin = nullptr;
const char *smax = nullptr;
uint32_t lmin = 0;
uint32_t lmax = 0;
uint32_t nn_cnt = 0;
bool has_minmax;
string_stats minval, maxval;
for (uint32_t i = 0; i < s->group.num_rows; i += block_size) {
uint32_t r = i + t;
uint32_t row = r + s->group.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_values)
? (valid_map) ? (valid_map[(row + s->col.column_offset) >> 5] >>
((row + s->col.column_offset) & 0x1f)) &
1
: 1
: 0;
if (is_valid) {
const nvstrdesc_s *str_col = static_cast<const nvstrdesc_s *>(s->col.column_data_base);
uint32_t len = (uint32_t)str_col[row].count;
const char *ptr = str_col[row].ptr;
len_sum += len;
if (!smin || nvstr_is_lesser(ptr, len, smin, lmin)) {
lmin = len;
smin = ptr;
}
if (!smax || nvstr_is_greater(ptr, len, smax, lmax)) {
lmax = len;
smax = ptr;
}
}
nn_cnt += __syncthreads_count(is_valid);
}
if (!t) {
s->ck.non_nulls = nn_cnt;
s->ck.null_count = s->group.num_rows - nn_cnt;
}
minval = WarpReduceMinString(smin, lmin);
maxval = WarpReduceMaxString(smax, lmax);
__syncwarp();
if (!(t & 0x1f)) {
s->warp_min[t >> 5].str_val.ptr = minval.ptr;
s->warp_min[t >> 5].str_val.length = minval.length;
s->warp_max[t >> 5].str_val.ptr = maxval.ptr;
s->warp_max[t >> 5].str_val.length = maxval.length;
}
has_minmax = __syncthreads_or(smin != nullptr);
if (has_minmax) { len_sum = block_reduce(storage.string_stats).Sum(len_sum); }
if (t < 32 * 1) {
minval = WarpReduceMinString(s->warp_min[t].str_val.ptr, s->warp_min[t].str_val.length);
if (!(t & 0x1f)) {
if (has_minmax) {
s->ck.min_value.str_val.ptr = minval.ptr;
s->ck.min_value.str_val.length = minval.length;
s->ck.sum.i_val = len_sum;
}
s->ck.has_minmax = has_minmax;
s->ck.has_sum = has_minmax;
}
} else if (t < 32 * 2 and has_minmax) {
maxval =
WarpReduceMaxString(s->warp_max[t & 0x1f].str_val.ptr, s->warp_max[t & 0x1f].str_val.length);
if (!(t & 0x1f)) {
s->ck.max_value.str_val.ptr = maxval.ptr;
s->ck.max_value.str_val.length = maxval.length;
}
}
}
/**
* @brief Gather column chunk statistics (min/max values, sum and null count)
* for a group of rows.
*
* blockDim {1024,1,1}
*
* @param chunks Destination statistics results
* @param groups Statistics source information
*/
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpuGatherColumnStatistics(statistics_chunk *chunks, const statistics_group *groups)
{
__shared__ __align__(8) stats_state_s state_g;
__shared__ union {
typename hipcub::BlockReduce<int64_t, block_size>::TempStorage integer_stats;
typename hipcub::BlockReduce<double, block_size>::TempStorage float_stats;
typename hipcub::BlockReduce<uint32_t, block_size>::TempStorage string_stats;
} temp_storage;
stats_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
statistics_dtype dtype;
if (t < sizeof(statistics_group) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->group)[t] =
reinterpret_cast<const uint32_t *>(&groups[blockIdx.x])[t];
}
if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->ck)[t] = 0;
}
__syncthreads();
if (t < sizeof(stats_column_desc) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(s->group.col)[t];
}
__syncthreads();
dtype = s->col.stats_dtype;
if (dtype >= dtype_bool && dtype <= dtype_decimal64) {
gatherIntColumnStats(s, dtype, t, temp_storage);
} else if (dtype >= dtype_float32 && dtype <= dtype_float64) {
gatherFloatColumnStats(s, dtype, t, temp_storage);
} else if (dtype == dtype_string) {
gatherStringColumnStats(s, t, temp_storage);
}
__syncthreads();
if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&chunks[blockIdx.x])[t] = reinterpret_cast<uint32_t *>(&s->ck)[t];
}
}
/**
* @brief Merge statistics for integer-like columns
*
* @param s shared block state
* @param dtype data type
* @param ck_in pointer to first statistic chunk
* @param num_chunks number of statistic chunks to merge
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__ mergeIntColumnStats(merge_state_s *s,
statistics_dtype dtype,
const statistics_chunk *ck_in,
uint32_t num_chunks,
uint32_t t,
Storage &storage)
{
int64_t vmin = INT64_MAX;
int64_t vmax = INT64_MIN;
int64_t vsum = 0;
uint32_t non_nulls = 0;
uint32_t null_count = 0;
__shared__ volatile bool has_minmax;
for (uint32_t i = t; i < num_chunks; i += block_size) {
const statistics_chunk *ck = &ck_in[i];
if (ck->has_minmax) {
vmin = min(vmin, ck->min_value.i_val);
vmax = max(vmax, ck->max_value.i_val);
}
if (ck->has_sum) { vsum += ck->sum.i_val; }
non_nulls += ck->non_nulls;
null_count += ck->null_count;
}
vmin = hipcub::BlockReduce<int64_t, block_size>(storage.i64).Reduce(vmin, hipcub::Min());
__syncthreads();
vmax = hipcub::BlockReduce<int64_t, block_size>(storage.i64).Reduce(vmax, hipcub::Max());
if (!t) { has_minmax = (vmin <= vmax); }
__syncthreads();
non_nulls = hipcub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(non_nulls);
__syncthreads();
null_count = hipcub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(null_count);
__syncthreads();
if (has_minmax) { vsum = hipcub::BlockReduce<int64_t, block_size>(storage.i64).Sum(vsum); }
if (!t) {
if (has_minmax) {
s->ck.min_value.i_val = vmin;
s->ck.max_value.i_val = vmax;
s->ck.sum.i_val = vsum;
}
s->ck.has_minmax = has_minmax;
// TODO: For now, don't set the sum flag with 64-bit values so we don't have to check for
// 64-bit sum overflow
s->ck.has_sum = (dtype <= dtype_int32 && has_minmax);
s->ck.non_nulls = non_nulls;
s->ck.null_count = null_count;
}
}
/**
* @brief Merge statistics for floating-point columns
*
* @param s shared block state
* @param dtype data type
* @param ck_in pointer to first statistic chunk
* @param num_chunks number of statistic chunks to merge
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__ mergeFloatColumnStats(merge_state_s *s,
const statistics_chunk *ck_in,
uint32_t num_chunks,
uint32_t t,
Storage &storage)
{
double vmin = CUDART_INF;
double vmax = -CUDART_INF;
double vsum = 0;
uint32_t non_nulls = 0;
uint32_t null_count = 0;
__shared__ volatile bool has_minmax;
for (uint32_t i = t; i < num_chunks; i += block_size) {
const statistics_chunk *ck = &ck_in[i];
if (ck->has_minmax) {
double v0 = ck->min_value.fp_val;
double v1 = ck->max_value.fp_val;
if (v0 < vmin) { vmin = v0; }
if (v1 > vmax) { vmax = v1; }
}
if (ck->has_sum) { vsum += ck->sum.fp_val; }
non_nulls += ck->non_nulls;
null_count += ck->null_count;
}
vmin = hipcub::BlockReduce<double, block_size>(storage.f64).Reduce(vmin, hipcub::Min());
__syncthreads();
vmax = hipcub::BlockReduce<double, block_size>(storage.f64).Reduce(vmax, hipcub::Max());
if (!t) { has_minmax = (vmin <= vmax); }
__syncthreads();
non_nulls = hipcub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(non_nulls);
__syncthreads();
null_count = hipcub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(null_count);
__syncthreads();
if (has_minmax) {
vsum = hipcub::BlockReduce<double, block_size>(storage.f64).Reduce(vsum, IgnoreNaNSum());
}
if (!t) {
if (has_minmax) {
s->ck.min_value.fp_val = (vmin != 0.0) ? vmin : CUDART_NEG_ZERO;
s->ck.max_value.fp_val = (vmax != 0.0) ? vmax : CUDART_ZERO;
s->ck.sum.fp_val = vsum;
}
s->ck.has_minmax = has_minmax;
s->ck.has_sum = has_minmax; // Implies sum is valid as well
s->ck.non_nulls = non_nulls;
s->ck.null_count = null_count;
}
}
/**
* @brief Merge statistics for string columns
*
* @param s shared block state
* @param ck_in pointer to first statistic chunk
* @param num_chunks number of statistic chunks to merge
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__ mergeStringColumnStats(merge_state_s *s,
const statistics_chunk *ck_in,
uint32_t num_chunks,
uint32_t t,
Storage &storage)
{
uint32_t len_sum = 0;
const char *smin = nullptr;
const char *smax = nullptr;
uint32_t lmin = 0;
uint32_t lmax = 0;
uint32_t non_nulls = 0;
uint32_t null_count = 0;
bool has_minmax;
string_stats minval, maxval;
for (uint32_t i = t; i < num_chunks; i += block_size) {
const statistics_chunk *ck = &ck_in[i];
if (ck->has_minmax) {
uint32_t len0 = ck->min_value.str_val.length;
const char *ptr0 = ck->min_value.str_val.ptr;
uint32_t len1 = ck->max_value.str_val.length;
const char *ptr1 = ck->max_value.str_val.ptr;
if (!smin || (ptr0 && nvstr_is_lesser(ptr0, len0, smin, lmin))) {
lmin = len0;
smin = ptr0;
}
if (!smax || (ptr1 && nvstr_is_greater(ptr1, len1, smax, lmax))) {
lmax = len1;
smax = ptr1;
}
}
if (ck->has_sum) { len_sum += (uint32_t)ck->sum.i_val; }
non_nulls += ck->non_nulls;
null_count += ck->null_count;
}
minval = WarpReduceMinString(smin, lmin);
maxval = WarpReduceMaxString(smax, lmax);
if (!(t & 0x1f)) {
s->warp_min[t >> 5].str_val.ptr = minval.ptr;
s->warp_min[t >> 5].str_val.length = minval.length;
s->warp_max[t >> 5].str_val.ptr = maxval.ptr;
s->warp_max[t >> 5].str_val.length = maxval.length;
}
has_minmax = __syncthreads_or(smin != nullptr);
non_nulls = hipcub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(non_nulls);
__syncthreads();
null_count = hipcub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(null_count);
__syncthreads();
if (has_minmax) { len_sum = hipcub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(len_sum); }
if (t < 32 * 1) {
minval = WarpReduceMinString(s->warp_min[t].str_val.ptr, s->warp_min[t].str_val.length);
if (!(t & 0x1f)) {
if (has_minmax) {
s->ck.min_value.str_val.ptr = minval.ptr;
s->ck.min_value.str_val.length = minval.length;
s->ck.sum.i_val = len_sum;
}
s->ck.has_minmax = has_minmax;
s->ck.has_sum = has_minmax;
s->ck.non_nulls = non_nulls;
s->ck.null_count = null_count;
}
} else if (t < 32 * 2) {
maxval =
WarpReduceMaxString(s->warp_max[t & 0x1f].str_val.ptr, s->warp_max[t & 0x1f].str_val.length);
if (!((t & 0x1f) and has_minmax)) {
s->ck.max_value.str_val.ptr = maxval.ptr;
s->ck.max_value.str_val.length = maxval.length;
}
}
}
/**
* @brief Combine multiple statistics chunk together to form new statistics chunks
*
* blockDim {1024,1,1}
*
* @param chunks_out Destination statistic chunks
* @param chunks_in Source statistic chunks
* @param groups Statistic chunk grouping information
*/
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpuMergeColumnStatistics(statistics_chunk *chunks_out,
const statistics_chunk *chunks_in,
const statistics_merge_group *groups)
{
__shared__ __align__(8) merge_state_s state_g;
__shared__ struct {
typename hipcub::BlockReduce<uint32_t, block_size>::TempStorage u32;
typename hipcub::BlockReduce<int64_t, block_size>::TempStorage i64;
typename hipcub::BlockReduce<double, block_size>::TempStorage f64;
} storage;
merge_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
statistics_dtype dtype;
if (t < sizeof(statistics_merge_group) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->group)[t] =
reinterpret_cast<const uint32_t *>(&groups[blockIdx.x])[t];
}
__syncthreads();
if (t < sizeof(stats_column_desc) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(s->group.col)[t];
}
__syncthreads();
dtype = s->col.stats_dtype;
if (dtype >= dtype_bool && dtype <= dtype_decimal64) {
mergeIntColumnStats(
s, dtype, chunks_in + s->group.start_chunk, s->group.num_chunks, t, storage);
} else if (dtype >= dtype_float32 && dtype <= dtype_float64) {
mergeFloatColumnStats(s, chunks_in + s->group.start_chunk, s->group.num_chunks, t, storage);
} else if (dtype == dtype_string) {
mergeStringColumnStats(s, chunks_in + s->group.start_chunk, s->group.num_chunks, t, storage);
}
__syncthreads();
if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&chunks_out[blockIdx.x])[t] =
reinterpret_cast<uint32_t *>(&s->ck)[t];
}
}
/**
* @brief Launches kernel to gather column statistics
*
* @param[out] chunks Statistics results [num_chunks]
* @param[in] groups Statistics row groups [num_chunks]
* @param[in] num_chunks Number of chunks & rowgroups
* @param[in] stream CUDA stream to use, default 0
*/
void GatherColumnStatistics(statistics_chunk *chunks,
const statistics_group *groups,
uint32_t num_chunks,
rmm::cuda_stream_view stream)
{
hipLaunchKernelGGL(( gpuGatherColumnStatistics<block_size>)
, dim3(num_chunks), dim3(block_size), 0, stream.value(), chunks, groups);
}
/**
* @brief Launches kernel to merge column statistics
*
* @param[out] chunks_out Statistics results [num_chunks]
* @param[out] chunks_in Input statistics
* @param[in] groups Statistics groups [num_chunks]
* @param[in] num_chunks Number of chunks & groups
* @param[in] stream CUDA stream to use, default 0
*/
void MergeColumnStatistics(statistics_chunk *chunks_out,
const statistics_chunk *chunks_in,
const statistics_merge_group *groups,
uint32_t num_chunks,
rmm::cuda_stream_view stream)
{
hipLaunchKernelGGL(( gpuMergeColumnStatistics<block_size>)
, dim3(num_chunks), dim3(block_size), 0, stream.value(), chunks_out, chunks_in, groups);
}
} // namespace io
} // namespace cudf
| 4e95aeb4dd4afeae7ea8dd647235dd99cb61a24a.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "column_stats.h"
#include <io/utilities/block_utils.cuh>
#include <rmm/cuda_stream_view.hpp>
#include <cub/cub.cuh>
#include <math_constants.h>
constexpr int block_size = 1024;
namespace cudf {
namespace io {
/**
* @brief shared state for statistics gather kernel
*/
struct stats_state_s {
stats_column_desc col; ///< Column information
statistics_group group; ///< Group description
statistics_chunk ck; ///< Output statistics chunk
volatile statistics_val warp_min[32]; ///< Min reduction scratch
volatile statistics_val warp_max[32]; ///< Max reduction scratch
};
/**
* @brief shared state for statistics merge kernel
*/
struct merge_state_s {
stats_column_desc col; ///< Column information
statistics_merge_group group; ///< Group description
statistics_chunk ck; ///< Resulting statistics chunk
volatile statistics_val warp_min[32]; ///< Min reduction scratch
volatile statistics_val warp_max[32]; ///< Max reduction scratch
};
/**
* Custom addition functor to ignore NaN inputs
*/
struct IgnoreNaNSum {
__device__ __forceinline__ double operator()(const double &a, const double &b)
{
double aval = isnan(a) ? 0 : a;
double bval = isnan(b) ? 0 : b;
return aval + bval;
}
};
/**
* Warp-wide Min reduction for string types
*/
inline __device__ string_stats WarpReduceMinString(const char *smin, uint32_t lmin)
{
uint32_t len = shuffle_xor(lmin, 1);
const char *ptr =
reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smin), 1));
if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) {
smin = ptr;
lmin = len;
}
len = shuffle_xor(lmin, 2);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smin), 2));
if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) {
smin = ptr;
lmin = len;
}
len = shuffle_xor(lmin, 4);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smin), 4));
if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) {
smin = ptr;
lmin = len;
}
len = shuffle_xor(lmin, 8);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smin), 8));
if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) {
smin = ptr;
lmin = len;
}
len = shuffle_xor(lmin, 16);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smin), 16));
if (!smin || (ptr && nvstr_is_lesser(ptr, len, smin, lmin))) {
smin = ptr;
lmin = len;
}
return {smin, lmin};
}
/**
* Warp-wide Max reduction for string types
*/
inline __device__ string_stats WarpReduceMaxString(const char *smax, uint32_t lmax)
{
uint32_t len = shuffle_xor(lmax, 1);
const char *ptr =
reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smax), 1));
if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) {
smax = ptr;
lmax = len;
}
len = shuffle_xor(lmax, 2);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smax), 2));
if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) {
smax = ptr;
lmax = len;
}
len = shuffle_xor(lmax, 4);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smax), 4));
if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) {
smax = ptr;
lmax = len;
}
len = shuffle_xor(lmax, 8);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smax), 8));
if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) {
smax = ptr;
lmax = len;
}
len = shuffle_xor(lmax, 16);
ptr = reinterpret_cast<const char *>(shuffle_xor(reinterpret_cast<uintptr_t>(smax), 16));
if (!smax || (ptr && nvstr_is_greater(ptr, len, smax, lmax))) {
smax = ptr;
lmax = len;
}
return {smax, lmax};
}
/**
* @brief Gather statistics for integer-like columns
*
* @param s shared block state
* @param dtype data type
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__
gatherIntColumnStats(stats_state_s *s, statistics_dtype dtype, uint32_t t, Storage &storage)
{
using block_reduce = cub::BlockReduce<int64_t, block_size>;
int64_t vmin = INT64_MAX;
int64_t vmax = INT64_MIN;
int64_t vsum = 0;
int64_t v;
uint32_t nn_cnt = 0;
__shared__ volatile bool has_minmax;
for (uint32_t i = 0; i < s->group.num_rows; i += block_size) {
uint32_t r = i + t;
uint32_t row = r + s->group.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_values)
? (valid_map) ? (valid_map[(row + s->col.column_offset) / 32] >>
((row + s->col.column_offset) % 32)) &
1
: 1
: 0;
if (is_valid) {
switch (dtype) {
case dtype_int32:
case dtype_date32: v = static_cast<const int32_t *>(s->col.column_data_base)[row]; break;
case dtype_int64:
case dtype_decimal64: v = static_cast<const int64_t *>(s->col.column_data_base)[row]; break;
case dtype_int16: v = static_cast<const int16_t *>(s->col.column_data_base)[row]; break;
case dtype_timestamp64:
v = static_cast<const int64_t *>(s->col.column_data_base)[row];
if (s->col.ts_scale < -1) {
v /= -s->col.ts_scale;
} else if (s->col.ts_scale > 1) {
v *= s->col.ts_scale;
}
break;
default: v = static_cast<const int8_t *>(s->col.column_data_base)[row]; break;
}
vmin = min(vmin, v);
vmax = max(vmax, v);
vsum += v;
}
nn_cnt += __syncthreads_count(is_valid);
}
if (!t) {
s->ck.non_nulls = nn_cnt;
s->ck.null_count = s->group.num_rows - nn_cnt;
}
vmin = block_reduce(storage.integer_stats).Reduce(vmin, cub::Min());
__syncthreads();
vmax = block_reduce(storage.integer_stats).Reduce(vmax, cub::Max());
if (!t) { has_minmax = (vmin <= vmax); }
__syncthreads();
if (has_minmax) { vsum = block_reduce(storage.integer_stats).Sum(vsum); }
if (!t) {
if (has_minmax) {
s->ck.min_value.i_val = vmin;
s->ck.max_value.i_val = vmax;
s->ck.sum.i_val = vsum;
}
s->ck.has_minmax = has_minmax;
// TODO: For now, don't set the sum flag with 64-bit values so we don't have to check for
// 64-bit sum overflow
s->ck.has_sum = (dtype <= dtype_int32 && has_minmax);
}
}
/**
* @brief Gather statistics for floating-point columns
*
* @param s shared block state
* @param dtype data type
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__
gatherFloatColumnStats(stats_state_s *s, statistics_dtype dtype, uint32_t t, Storage &storage)
{
using block_reduce = cub::BlockReduce<double, block_size>;
double vmin = CUDART_INF;
double vmax = -CUDART_INF;
double vsum = 0;
double v;
uint32_t nn_cnt = 0;
__shared__ volatile bool has_minmax;
for (uint32_t i = 0; i < s->group.num_rows; i += block_size) {
uint32_t r = i + t;
uint32_t row = r + s->group.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_values)
? (valid_map) ? (valid_map[(row + s->col.column_offset) >> 5] >>
((row + s->col.column_offset) & 0x1f)) &
1
: 1
: 0;
if (is_valid) {
if (dtype == dtype_float64) {
v = static_cast<const double *>(s->col.column_data_base)[row];
} else {
v = static_cast<const float *>(s->col.column_data_base)[row];
}
if (v < vmin) { vmin = v; }
if (v > vmax) { vmax = v; }
if (!isnan(v)) { vsum += v; }
}
nn_cnt += __syncthreads_count(is_valid);
}
if (!t) {
s->ck.non_nulls = nn_cnt;
s->ck.null_count = s->group.num_rows - nn_cnt;
}
vmin = block_reduce(storage.float_stats).Reduce(vmin, cub::Min());
__syncthreads();
vmax = block_reduce(storage.float_stats).Reduce(vmax, cub::Max());
if (!t) { has_minmax = (vmin <= vmax); }
__syncthreads();
if (has_minmax) { vsum = block_reduce(storage.float_stats).Reduce(vsum, IgnoreNaNSum()); }
if (!t) {
if (has_minmax) {
s->ck.min_value.fp_val = (vmin != 0.0) ? vmin : CUDART_NEG_ZERO;
s->ck.max_value.fp_val = (vmax != 0.0) ? vmax : CUDART_ZERO;
s->ck.sum.fp_val = vsum;
}
s->ck.has_minmax = has_minmax;
s->ck.has_sum = has_minmax; // Implies sum is valid as well
}
}
// FIXME: Use native libcudf string type
struct nvstrdesc_s {
const char *ptr;
size_t count;
};
/**
* @brief Gather statistics for string columns
*
* @param s shared block state
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__ gatherStringColumnStats(stats_state_s *s, uint32_t t, Storage &storage)
{
using block_reduce = cub::BlockReduce<uint32_t, block_size>;
uint32_t len_sum = 0;
const char *smin = nullptr;
const char *smax = nullptr;
uint32_t lmin = 0;
uint32_t lmax = 0;
uint32_t nn_cnt = 0;
bool has_minmax;
string_stats minval, maxval;
for (uint32_t i = 0; i < s->group.num_rows; i += block_size) {
uint32_t r = i + t;
uint32_t row = r + s->group.start_row;
const uint32_t *valid_map = s->col.valid_map_base;
uint32_t is_valid = (r < s->group.num_rows && row < s->col.num_values)
? (valid_map) ? (valid_map[(row + s->col.column_offset) >> 5] >>
((row + s->col.column_offset) & 0x1f)) &
1
: 1
: 0;
if (is_valid) {
const nvstrdesc_s *str_col = static_cast<const nvstrdesc_s *>(s->col.column_data_base);
uint32_t len = (uint32_t)str_col[row].count;
const char *ptr = str_col[row].ptr;
len_sum += len;
if (!smin || nvstr_is_lesser(ptr, len, smin, lmin)) {
lmin = len;
smin = ptr;
}
if (!smax || nvstr_is_greater(ptr, len, smax, lmax)) {
lmax = len;
smax = ptr;
}
}
nn_cnt += __syncthreads_count(is_valid);
}
if (!t) {
s->ck.non_nulls = nn_cnt;
s->ck.null_count = s->group.num_rows - nn_cnt;
}
minval = WarpReduceMinString(smin, lmin);
maxval = WarpReduceMaxString(smax, lmax);
__syncwarp();
if (!(t & 0x1f)) {
s->warp_min[t >> 5].str_val.ptr = minval.ptr;
s->warp_min[t >> 5].str_val.length = minval.length;
s->warp_max[t >> 5].str_val.ptr = maxval.ptr;
s->warp_max[t >> 5].str_val.length = maxval.length;
}
has_minmax = __syncthreads_or(smin != nullptr);
if (has_minmax) { len_sum = block_reduce(storage.string_stats).Sum(len_sum); }
if (t < 32 * 1) {
minval = WarpReduceMinString(s->warp_min[t].str_val.ptr, s->warp_min[t].str_val.length);
if (!(t & 0x1f)) {
if (has_minmax) {
s->ck.min_value.str_val.ptr = minval.ptr;
s->ck.min_value.str_val.length = minval.length;
s->ck.sum.i_val = len_sum;
}
s->ck.has_minmax = has_minmax;
s->ck.has_sum = has_minmax;
}
} else if (t < 32 * 2 and has_minmax) {
maxval =
WarpReduceMaxString(s->warp_max[t & 0x1f].str_val.ptr, s->warp_max[t & 0x1f].str_val.length);
if (!(t & 0x1f)) {
s->ck.max_value.str_val.ptr = maxval.ptr;
s->ck.max_value.str_val.length = maxval.length;
}
}
}
/**
* @brief Gather column chunk statistics (min/max values, sum and null count)
* for a group of rows.
*
* blockDim {1024,1,1}
*
* @param chunks Destination statistics results
* @param groups Statistics source information
*/
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpuGatherColumnStatistics(statistics_chunk *chunks, const statistics_group *groups)
{
__shared__ __align__(8) stats_state_s state_g;
__shared__ union {
typename cub::BlockReduce<int64_t, block_size>::TempStorage integer_stats;
typename cub::BlockReduce<double, block_size>::TempStorage float_stats;
typename cub::BlockReduce<uint32_t, block_size>::TempStorage string_stats;
} temp_storage;
stats_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
statistics_dtype dtype;
if (t < sizeof(statistics_group) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->group)[t] =
reinterpret_cast<const uint32_t *>(&groups[blockIdx.x])[t];
}
if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->ck)[t] = 0;
}
__syncthreads();
if (t < sizeof(stats_column_desc) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(s->group.col)[t];
}
__syncthreads();
dtype = s->col.stats_dtype;
if (dtype >= dtype_bool && dtype <= dtype_decimal64) {
gatherIntColumnStats(s, dtype, t, temp_storage);
} else if (dtype >= dtype_float32 && dtype <= dtype_float64) {
gatherFloatColumnStats(s, dtype, t, temp_storage);
} else if (dtype == dtype_string) {
gatherStringColumnStats(s, t, temp_storage);
}
__syncthreads();
if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&chunks[blockIdx.x])[t] = reinterpret_cast<uint32_t *>(&s->ck)[t];
}
}
/**
* @brief Merge statistics for integer-like columns
*
* @param s shared block state
* @param dtype data type
* @param ck_in pointer to first statistic chunk
* @param num_chunks number of statistic chunks to merge
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__ mergeIntColumnStats(merge_state_s *s,
statistics_dtype dtype,
const statistics_chunk *ck_in,
uint32_t num_chunks,
uint32_t t,
Storage &storage)
{
int64_t vmin = INT64_MAX;
int64_t vmax = INT64_MIN;
int64_t vsum = 0;
uint32_t non_nulls = 0;
uint32_t null_count = 0;
__shared__ volatile bool has_minmax;
for (uint32_t i = t; i < num_chunks; i += block_size) {
const statistics_chunk *ck = &ck_in[i];
if (ck->has_minmax) {
vmin = min(vmin, ck->min_value.i_val);
vmax = max(vmax, ck->max_value.i_val);
}
if (ck->has_sum) { vsum += ck->sum.i_val; }
non_nulls += ck->non_nulls;
null_count += ck->null_count;
}
vmin = cub::BlockReduce<int64_t, block_size>(storage.i64).Reduce(vmin, cub::Min());
__syncthreads();
vmax = cub::BlockReduce<int64_t, block_size>(storage.i64).Reduce(vmax, cub::Max());
if (!t) { has_minmax = (vmin <= vmax); }
__syncthreads();
non_nulls = cub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(non_nulls);
__syncthreads();
null_count = cub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(null_count);
__syncthreads();
if (has_minmax) { vsum = cub::BlockReduce<int64_t, block_size>(storage.i64).Sum(vsum); }
if (!t) {
if (has_minmax) {
s->ck.min_value.i_val = vmin;
s->ck.max_value.i_val = vmax;
s->ck.sum.i_val = vsum;
}
s->ck.has_minmax = has_minmax;
// TODO: For now, don't set the sum flag with 64-bit values so we don't have to check for
// 64-bit sum overflow
s->ck.has_sum = (dtype <= dtype_int32 && has_minmax);
s->ck.non_nulls = non_nulls;
s->ck.null_count = null_count;
}
}
/**
* @brief Merge statistics for floating-point columns
*
* @param s shared block state
* @param dtype data type
* @param ck_in pointer to first statistic chunk
* @param num_chunks number of statistic chunks to merge
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__ mergeFloatColumnStats(merge_state_s *s,
const statistics_chunk *ck_in,
uint32_t num_chunks,
uint32_t t,
Storage &storage)
{
double vmin = CUDART_INF;
double vmax = -CUDART_INF;
double vsum = 0;
uint32_t non_nulls = 0;
uint32_t null_count = 0;
__shared__ volatile bool has_minmax;
for (uint32_t i = t; i < num_chunks; i += block_size) {
const statistics_chunk *ck = &ck_in[i];
if (ck->has_minmax) {
double v0 = ck->min_value.fp_val;
double v1 = ck->max_value.fp_val;
if (v0 < vmin) { vmin = v0; }
if (v1 > vmax) { vmax = v1; }
}
if (ck->has_sum) { vsum += ck->sum.fp_val; }
non_nulls += ck->non_nulls;
null_count += ck->null_count;
}
vmin = cub::BlockReduce<double, block_size>(storage.f64).Reduce(vmin, cub::Min());
__syncthreads();
vmax = cub::BlockReduce<double, block_size>(storage.f64).Reduce(vmax, cub::Max());
if (!t) { has_minmax = (vmin <= vmax); }
__syncthreads();
non_nulls = cub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(non_nulls);
__syncthreads();
null_count = cub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(null_count);
__syncthreads();
if (has_minmax) {
vsum = cub::BlockReduce<double, block_size>(storage.f64).Reduce(vsum, IgnoreNaNSum());
}
if (!t) {
if (has_minmax) {
s->ck.min_value.fp_val = (vmin != 0.0) ? vmin : CUDART_NEG_ZERO;
s->ck.max_value.fp_val = (vmax != 0.0) ? vmax : CUDART_ZERO;
s->ck.sum.fp_val = vsum;
}
s->ck.has_minmax = has_minmax;
s->ck.has_sum = has_minmax; // Implies sum is valid as well
s->ck.non_nulls = non_nulls;
s->ck.null_count = null_count;
}
}
/**
* @brief Merge statistics for string columns
*
* @param s shared block state
* @param ck_in pointer to first statistic chunk
* @param num_chunks number of statistic chunks to merge
* @param t thread id
* @param storage temporary storage for reduction
*/
template <typename Storage>
void __device__ mergeStringColumnStats(merge_state_s *s,
const statistics_chunk *ck_in,
uint32_t num_chunks,
uint32_t t,
Storage &storage)
{
uint32_t len_sum = 0;
const char *smin = nullptr;
const char *smax = nullptr;
uint32_t lmin = 0;
uint32_t lmax = 0;
uint32_t non_nulls = 0;
uint32_t null_count = 0;
bool has_minmax;
string_stats minval, maxval;
for (uint32_t i = t; i < num_chunks; i += block_size) {
const statistics_chunk *ck = &ck_in[i];
if (ck->has_minmax) {
uint32_t len0 = ck->min_value.str_val.length;
const char *ptr0 = ck->min_value.str_val.ptr;
uint32_t len1 = ck->max_value.str_val.length;
const char *ptr1 = ck->max_value.str_val.ptr;
if (!smin || (ptr0 && nvstr_is_lesser(ptr0, len0, smin, lmin))) {
lmin = len0;
smin = ptr0;
}
if (!smax || (ptr1 && nvstr_is_greater(ptr1, len1, smax, lmax))) {
lmax = len1;
smax = ptr1;
}
}
if (ck->has_sum) { len_sum += (uint32_t)ck->sum.i_val; }
non_nulls += ck->non_nulls;
null_count += ck->null_count;
}
minval = WarpReduceMinString(smin, lmin);
maxval = WarpReduceMaxString(smax, lmax);
if (!(t & 0x1f)) {
s->warp_min[t >> 5].str_val.ptr = minval.ptr;
s->warp_min[t >> 5].str_val.length = minval.length;
s->warp_max[t >> 5].str_val.ptr = maxval.ptr;
s->warp_max[t >> 5].str_val.length = maxval.length;
}
has_minmax = __syncthreads_or(smin != nullptr);
non_nulls = cub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(non_nulls);
__syncthreads();
null_count = cub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(null_count);
__syncthreads();
if (has_minmax) { len_sum = cub::BlockReduce<uint32_t, block_size>(storage.u32).Sum(len_sum); }
if (t < 32 * 1) {
minval = WarpReduceMinString(s->warp_min[t].str_val.ptr, s->warp_min[t].str_val.length);
if (!(t & 0x1f)) {
if (has_minmax) {
s->ck.min_value.str_val.ptr = minval.ptr;
s->ck.min_value.str_val.length = minval.length;
s->ck.sum.i_val = len_sum;
}
s->ck.has_minmax = has_minmax;
s->ck.has_sum = has_minmax;
s->ck.non_nulls = non_nulls;
s->ck.null_count = null_count;
}
} else if (t < 32 * 2) {
maxval =
WarpReduceMaxString(s->warp_max[t & 0x1f].str_val.ptr, s->warp_max[t & 0x1f].str_val.length);
if (!((t & 0x1f) and has_minmax)) {
s->ck.max_value.str_val.ptr = maxval.ptr;
s->ck.max_value.str_val.length = maxval.length;
}
}
}
/**
* @brief Combine multiple statistics chunk together to form new statistics chunks
*
* blockDim {1024,1,1}
*
* @param chunks_out Destination statistic chunks
* @param chunks_in Source statistic chunks
* @param groups Statistic chunk grouping information
*/
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
gpuMergeColumnStatistics(statistics_chunk *chunks_out,
const statistics_chunk *chunks_in,
const statistics_merge_group *groups)
{
__shared__ __align__(8) merge_state_s state_g;
__shared__ struct {
typename cub::BlockReduce<uint32_t, block_size>::TempStorage u32;
typename cub::BlockReduce<int64_t, block_size>::TempStorage i64;
typename cub::BlockReduce<double, block_size>::TempStorage f64;
} storage;
merge_state_s *const s = &state_g;
uint32_t t = threadIdx.x;
statistics_dtype dtype;
if (t < sizeof(statistics_merge_group) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->group)[t] =
reinterpret_cast<const uint32_t *>(&groups[blockIdx.x])[t];
}
__syncthreads();
if (t < sizeof(stats_column_desc) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&s->col)[t] = reinterpret_cast<const uint32_t *>(s->group.col)[t];
}
__syncthreads();
dtype = s->col.stats_dtype;
if (dtype >= dtype_bool && dtype <= dtype_decimal64) {
mergeIntColumnStats(
s, dtype, chunks_in + s->group.start_chunk, s->group.num_chunks, t, storage);
} else if (dtype >= dtype_float32 && dtype <= dtype_float64) {
mergeFloatColumnStats(s, chunks_in + s->group.start_chunk, s->group.num_chunks, t, storage);
} else if (dtype == dtype_string) {
mergeStringColumnStats(s, chunks_in + s->group.start_chunk, s->group.num_chunks, t, storage);
}
__syncthreads();
if (t < sizeof(statistics_chunk) / sizeof(uint32_t)) {
reinterpret_cast<uint32_t *>(&chunks_out[blockIdx.x])[t] =
reinterpret_cast<uint32_t *>(&s->ck)[t];
}
}
/**
* @brief Launches kernel to gather column statistics
*
* @param[out] chunks Statistics results [num_chunks]
* @param[in] groups Statistics row groups [num_chunks]
* @param[in] num_chunks Number of chunks & rowgroups
* @param[in] stream CUDA stream to use, default 0
*/
void GatherColumnStatistics(statistics_chunk *chunks,
const statistics_group *groups,
uint32_t num_chunks,
rmm::cuda_stream_view stream)
{
gpuGatherColumnStatistics<block_size>
<<<num_chunks, block_size, 0, stream.value()>>>(chunks, groups);
}
/**
* @brief Launches kernel to merge column statistics
*
* @param[out] chunks_out Statistics results [num_chunks]
* @param[out] chunks_in Input statistics
* @param[in] groups Statistics groups [num_chunks]
* @param[in] num_chunks Number of chunks & groups
* @param[in] stream CUDA stream to use, default 0
*/
void MergeColumnStatistics(statistics_chunk *chunks_out,
const statistics_chunk *chunks_in,
const statistics_merge_group *groups,
uint32_t num_chunks,
rmm::cuda_stream_view stream)
{
gpuMergeColumnStatistics<block_size>
<<<num_chunks, block_size, 0, stream.value()>>>(chunks_out, chunks_in, groups);
}
} // namespace io
} // namespace cudf
|
dfb1b2361c120cb47092fbe6cad05911894c27aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// vAdd.cu
//
// driver and kernel call
#include <stdio.h>
#define THREADS_PER_BLOCK 32
__global__ void vAdd_d (int *a_d, int *b_d, int *c_d, int n)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < n)
c_d[x] = a_d[x] + b_d[x];
}
extern "C" void gpuAdd (int *a, int *b, int *c, int arraySize)
{
int *a_d, *b_d, *c_d;
hipMalloc ((void**) &a_d, sizeof(int) * arraySize);
hipMalloc ((void**) &b_d, sizeof(int) * arraySize);
hipMalloc ((void**) &c_d, sizeof(int) * arraySize);
hipMemcpy (a_d, a, sizeof(int) * arraySize, hipMemcpyHostToDevice);
hipMemcpy (b_d, b, sizeof(int) * arraySize, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vAdd_d) , dim3(ceil((float) arraySize/THREADS_PER_BLOCK)), dim3(THREADS_PER_BLOCK) , 0, 0, a_d, b_d, c_d, arraySize);
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf ("CUDA error: %s\n", hipGetErrorString(err));
hipMemcpy (c, c_d, sizeof(int) * arraySize, hipMemcpyDeviceToHost);
hipFree (a_d);
hipFree (b_d);
hipFree (c_d);
}
| dfb1b2361c120cb47092fbe6cad05911894c27aa.cu | // vAdd.cu
//
// driver and kernel call
#include <stdio.h>
#define THREADS_PER_BLOCK 32
__global__ void vAdd_d (int *a_d, int *b_d, int *c_d, int n)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < n)
c_d[x] = a_d[x] + b_d[x];
}
extern "C" void gpuAdd (int *a, int *b, int *c, int arraySize)
{
int *a_d, *b_d, *c_d;
cudaMalloc ((void**) &a_d, sizeof(int) * arraySize);
cudaMalloc ((void**) &b_d, sizeof(int) * arraySize);
cudaMalloc ((void**) &c_d, sizeof(int) * arraySize);
cudaMemcpy (a_d, a, sizeof(int) * arraySize, cudaMemcpyHostToDevice);
cudaMemcpy (b_d, b, sizeof(int) * arraySize, cudaMemcpyHostToDevice);
vAdd_d <<< ceil((float) arraySize/THREADS_PER_BLOCK), THREADS_PER_BLOCK >>> (a_d, b_d, c_d, arraySize);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf ("CUDA error: %s\n", cudaGetErrorString(err));
cudaMemcpy (c, c_d, sizeof(int) * arraySize, cudaMemcpyDeviceToHost);
cudaFree (a_d);
cudaFree (b_d);
cudaFree (c_d);
}
|
6eabb14cc540d8ea1a023b15aed53b55b2b928ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdint.h"
#include "hadoop/Pipes.hh"
#include "hadoop/TemplateFactory.hh"
#include "hadoop/StringUtils.hh"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <time.h>
#include <sys/time.h>
//#define DEBUG
int deviceID = 0;
// datum of a plot
// x,y : coordinate
// cent : id of nearest cluster
class data {
public:
float x;
float y;
int cent;
};
__device__ float mysqrt(data a, data b) {
float x = abs(a.x - b.x);
float y = abs(a.y - b.y);
return std::sqrt(x*x + y*y);
}
//data object assignment
__global__ void assign_data(data *centroids,
data *data,
int num_of_data,
int num_of_cluster)
{
int i;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// int tid = threadIdx.x;
// int nthreads = blockDim.x;
int nthreads = blockDim.x * gridDim.x;
// int part = num_of_data / nthreads; /* 65535*512 */
// for(i = part*tid; i < part*(tid+1); i++) {
for (i = tid; i < num_of_data; i += nthreads) {
int center = 0;
float dmin = mysqrt(centroids[0], data[i]);
for(int j = 1; j < num_of_cluster; j++) {
float dist = mysqrt(centroids[j], data[i]);
if(dist < dmin) {
dmin = dist;
center = j;
}
}
data[i].cent = center;
}
}
//K centroids recalculation
//tid has to be less than the num of newcent
__global__ void centroids_recalc(
data *newcent,
data *d,
int *ndata) {
int j;
int tid = blockIdx.x;
__shared__ float sx[64];
__shared__ float sy[64];
float x = 0.0f;
float y = 0.0f;
for(j = ndata[tid] + threadIdx.x; j < ndata[tid+1]; j += blockDim.x) {
x += d[j].x;
y += d[j].y;
}
sx[threadIdx.x] = x;
sy[threadIdx.x] = y;
__syncthreads();
float n = static_cast<float>(ndata[tid+1]-ndata[tid]);
if (threadIdx.x == 0) {
#pragma unroll
for (j = 1; j < 64; j++) {
x += sx[j];
y += sy[j];
}
newcent[tid].x = x / n;
newcent[tid].y = y / n;
}
/*
int j;
int tid = threadIdx.x;
newcent[tid].x = 0.0;
newcent[tid].y = 0.0;
for(j = ndata[tid]; j < ndata[tid+1]; j++) {
newcent[tid].x += d[j].x;
newcent[tid].y += d[j].y;
}
float n = static_cast<float>(ndata[tid+1]-ndata[tid]);
newcent[tid].x /= n;
newcent[tid].y /= n;
*/
}
class KmeansMap: public HadoopPipes::Mapper {
public:
KmeansMap(HadoopPipes::TaskContext& context){}
double gettime() {
struct timeval tv;
gettimeofday(&tv,NULL);
return tv.tv_sec+tv.tv_usec * 1e-6;
}
//zero init
void init_int(int *data, int num) {
for(int i = 0; i < num; i++) {
data[i] = 0;
}
}
void init_float(float *data, int num) {
for(int i = 0; i < num; i++) {
data[i] = 0.0;
}
}
void sort_by_cent(data *d, int start, int end)
{
int i = start;
int j = end;
int base = (d[start].cent + d[end].cent) / 2;
while(1) {
while (d[i].cent < base) i++;
while (d[j].cent > base) j--;
if (i >= j) break;
data temp = d[i];
d[i] = d[j];
d[j] = temp;
i++;
j--;
}
if (start < i-1) sort_by_cent(d, start, i-1);
if (end > j+1) sort_by_cent(d, j+1, end);
}
//counts the nunber of data objects contained by each cluster
void count_data_in_cluster(data *d, int *ndata,
int num_of_data, int num_of_cluster) {
int i;
for(i = 0; i < num_of_data; i++) {
ndata[d[i].cent + 1]++;
}
for(i = 1; i < num_of_cluster + 1; i++) {
ndata[i] += ndata[i-1];
}
}
/*
bool floatcmp(data *a, data *b, int num) {
for(int i = 0; i < num; i++) {
if(a[i].x != b[i].x || a[i].y != b[i].y) {
return false;
}
}
return true;
}
*/
float mysqrt(data a, data b) {
float x = a.x - b.x;
float y = a.y - b.y;
return std::sqrt(x*x + y*y);
}
bool datacmp(data *a, data *b, int num) {
for(int i = 0; i < num; i++) {
if( mysqrt(a[i], b[i]) > 1 ) {
return false;
}
}
return true;
}
void map(HadoopPipes::MapContext& context) {
// input format
// --num of clusters ( == k)
// --num of data( == n)
// --initial centers for all clusters;
// --input rows;
// fprintf(stderr, "start\n");
int mp;
double t[10];
t[0] = gettime();
std::vector<std::string> elements
= HadoopUtils::splitString(context.getInputValue(), " ");
t[1] = gettime();
const int k = HadoopUtils::toInt(elements[0]);
const int n = HadoopUtils::toInt(elements[1]);
// c[] : pos of cluster
// d[] : data
// ndata[] : num of data for each cluster
data c[2][k];
data d[n];
int ndata[k+1];
int i, cur, next, iter;
//for Device
data *dc;
data *dd;
int *dndata;
//initialize
for(i = 0; i < k; i++) {
c[0][i].x = HadoopUtils::toFloat(elements[2*i+2]);
c[0][i].y = HadoopUtils::toFloat(elements[2*i+3]);
}
for(i = 0; i < n; i++) {
d[i].x = HadoopUtils::toFloat(elements[2*i+2*k+2]);
d[i].y = HadoopUtils::toFloat(elements[2*i+2*k+3]);
}
t[2] = gettime();
#ifdef DEBUG
for(i = 0; i < k; i++) {
std::cout << c[0][i].x << " " << c[0][i].y;
}
std::cout << '\n';
for(i = 0; i < n; i++)
std::cout << d[i].x << " " << d[i].y << " ";
std::cout << '\n';
#endif
//cuda init
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
mp = prop.multiProcessorCount;
hipError_t err = hipMalloc((void **)&dc, sizeof(data)*2*k);
if(err != hipSuccess) {
std::cerr << "Malloc_1 failed: " << hipGetErrorString(err) << ".\n";
}
err = hipMalloc((void **)&dd, sizeof(data)*n);
if(err != hipSuccess) {
std::cerr << "Malloc_2 failed: " << hipGetErrorString(err) << ".\n";
}
err = hipMalloc((void **)&dndata, sizeof(int)*(k+1));
if(err != hipSuccess) {
std::cerr << "Malloc_3 failed: " << hipGetErrorString(err) << ".\n";
}
t[3] = gettime();
// fprintf(stderr, "mid\n");
// buffer id
cur = 0;
next = 1;
iter = 0;
do {
iter++;
// for(int j = 0; j < 10; j++) {
init_int(ndata, k+1);
//data object assignment
hipMemcpy(dc + cur*k, c[cur], sizeof(data)*k, hipMemcpyHostToDevice);
hipMemcpy(dd, d, sizeof(data)*n, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( assign_data), dim3(mp),dim3(512), 0, 0, dc + cur*k, dd, n, k);
err = hipMemcpy(d, dd, sizeof(data)*n, hipMemcpyDeviceToHost);
if(err != hipSuccess) {
std::cerr << "Memcpy_1 failed: " << hipGetErrorString(err) << ".\n";
}
t[4] = gettime();
#ifdef DEBUG
for(i = 0; i < n; i++)
std::cout << d[i].cent << " ";
std::cout << '\n';
#endif
//rearranges all data objects
//and counts the nunber of data objects contained by each cluster
sort_by_cent(d, 0, n-1);
count_data_in_cluster(d, ndata, n, k);
t[5] = gettime();
#ifdef DEBUG
for(i = 0; i < k+1; i++)
std::cout << ndata[i] << " ";
std::cout << '\n';
#endif
//K centroids recalculation
err = hipMemcpy(dndata, ndata, sizeof(int)*(k+1), hipMemcpyHostToDevice);
if(err != hipSuccess) {
std::cerr << "Memcpy_2_1 failed: " << hipGetErrorString(err) << ".\n";
} else {
std::cerr << "Memcpy_2_1 success: " << hipGetErrorString(err) << ".\n";
}
err = hipMemcpy(dc + next*k, c[next], sizeof(data)*k, hipMemcpyHostToDevice);
if(err != hipSuccess) {
std::cerr << "Memcpy_2_2 failed: " << hipGetErrorString(err) << ".\n";
} else {
std::cerr << "Memcpy_2_2 success: " << hipGetErrorString(err) << ".\n";
}
err = hipMemcpy(dd, d, sizeof(data)*n, hipMemcpyHostToDevice);
if(err != hipSuccess) {
std::cerr << "Memcpy_2_3 failed: " << hipGetErrorString(err) << ".\n";
} else {
std::cerr << "Memcpy_2_3 success: " << hipGetErrorString(err) << ".\n";
}
// centroids_recalc<<<1,k>>>(dc + next*k, dd, dndata);
hipLaunchKernelGGL(( centroids_recalc), dim3(k),dim3(64), 0, 0, dc + next*k, dd, dndata);
err = hipMemcpy(c[next], dc + next*k, sizeof(data)*k, hipMemcpyDeviceToHost);
if(err != hipSuccess) {
std::cerr << "Memcpy_2_4 failed: " << hipGetErrorString(err) << ".\n";
} else {
std::cerr << "Memcpy_2_4 success: " << hipGetErrorString(err) << ".\n";
}
t[6] = gettime();
#ifdef DEBUG
for(i = 0; i < k; i++)
std::cout << c[next][i].x << " " << c[next][i].y << " ";
std::cout << "\n\n";
#endif
cur = 1 - cur;
next = 1 - next;
} while( datacmp(c[cur], c[next], k) == false && iter < 100);
// }
// fprintf(stderr, "finish\n");
//emit
//key : cluster id
//value : cluster centroid position
for(i = 0; i < k; i++) {
context.emit(context.getInputKey() + '\t' + HadoopUtils::toString(i),
HadoopUtils::toString((int)c[cur][i].x) + '\t'
+ HadoopUtils::toString((int)c[cur][i].y));
}
t[7] = gettime();
std::cout << "Run on GPU" << '\n';
std::cout << "iter : " << iter << '\n';
for(i = 0; i < 7; i++) {
std::cout << t[i+1] - t[i] << '\n';
}
std::cout << t[7] - t[0] << '\n';
std::cout << '\n';
hipFree(dc);
hipFree(dd);
hipFree(dndata);
}
};
class KmeansReduce: public HadoopPipes::Reducer {
public:
KmeansReduce(HadoopPipes::TaskContext& context){}
void reduce(HadoopPipes::ReduceContext& context) {
while(context.nextValue()) {
context.emit(context.getInputKey(), context.getInputValue());
}
}
};
int main(int argc, char *argv[]) {
if(argc > 1) {
deviceID = atoi(argv[1]);
std::cout << "deviceID: " << deviceID << ".\n";
}
return HadoopPipes::runTask(HadoopPipes::TemplateFactory<KmeansMap,
KmeansReduce>());
}
| 6eabb14cc540d8ea1a023b15aed53b55b2b928ee.cu | #include "stdint.h"
#include "hadoop/Pipes.hh"
#include "hadoop/TemplateFactory.hh"
#include "hadoop/StringUtils.hh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <time.h>
#include <sys/time.h>
//#define DEBUG
int deviceID = 0;
// datum of a plot
// x,y : coordinate
// cent : id of nearest cluster
class data {
public:
float x;
float y;
int cent;
};
__device__ float mysqrt(data a, data b) {
float x = abs(a.x - b.x);
float y = abs(a.y - b.y);
return std::sqrt(x*x + y*y);
}
//data object assignment
__global__ void assign_data(data *centroids,
data *data,
int num_of_data,
int num_of_cluster)
{
int i;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// int tid = threadIdx.x;
// int nthreads = blockDim.x;
int nthreads = blockDim.x * gridDim.x;
// int part = num_of_data / nthreads; /* 65535*512 */
// for(i = part*tid; i < part*(tid+1); i++) {
for (i = tid; i < num_of_data; i += nthreads) {
int center = 0;
float dmin = mysqrt(centroids[0], data[i]);
for(int j = 1; j < num_of_cluster; j++) {
float dist = mysqrt(centroids[j], data[i]);
if(dist < dmin) {
dmin = dist;
center = j;
}
}
data[i].cent = center;
}
}
//K centroids recalculation
//tid has to be less than the num of newcent
__global__ void centroids_recalc(
data *newcent,
data *d,
int *ndata) {
int j;
int tid = blockIdx.x;
__shared__ float sx[64];
__shared__ float sy[64];
float x = 0.0f;
float y = 0.0f;
for(j = ndata[tid] + threadIdx.x; j < ndata[tid+1]; j += blockDim.x) {
x += d[j].x;
y += d[j].y;
}
sx[threadIdx.x] = x;
sy[threadIdx.x] = y;
__syncthreads();
float n = static_cast<float>(ndata[tid+1]-ndata[tid]);
if (threadIdx.x == 0) {
#pragma unroll
for (j = 1; j < 64; j++) {
x += sx[j];
y += sy[j];
}
newcent[tid].x = x / n;
newcent[tid].y = y / n;
}
/*
int j;
int tid = threadIdx.x;
newcent[tid].x = 0.0;
newcent[tid].y = 0.0;
for(j = ndata[tid]; j < ndata[tid+1]; j++) {
newcent[tid].x += d[j].x;
newcent[tid].y += d[j].y;
}
float n = static_cast<float>(ndata[tid+1]-ndata[tid]);
newcent[tid].x /= n;
newcent[tid].y /= n;
*/
}
class KmeansMap: public HadoopPipes::Mapper {
public:
KmeansMap(HadoopPipes::TaskContext& context){}
double gettime() {
struct timeval tv;
gettimeofday(&tv,NULL);
return tv.tv_sec+tv.tv_usec * 1e-6;
}
//zero init
void init_int(int *data, int num) {
for(int i = 0; i < num; i++) {
data[i] = 0;
}
}
void init_float(float *data, int num) {
for(int i = 0; i < num; i++) {
data[i] = 0.0;
}
}
void sort_by_cent(data *d, int start, int end)
{
int i = start;
int j = end;
int base = (d[start].cent + d[end].cent) / 2;
while(1) {
while (d[i].cent < base) i++;
while (d[j].cent > base) j--;
if (i >= j) break;
data temp = d[i];
d[i] = d[j];
d[j] = temp;
i++;
j--;
}
if (start < i-1) sort_by_cent(d, start, i-1);
if (end > j+1) sort_by_cent(d, j+1, end);
}
//counts the nunber of data objects contained by each cluster
void count_data_in_cluster(data *d, int *ndata,
int num_of_data, int num_of_cluster) {
int i;
for(i = 0; i < num_of_data; i++) {
ndata[d[i].cent + 1]++;
}
for(i = 1; i < num_of_cluster + 1; i++) {
ndata[i] += ndata[i-1];
}
}
/*
bool floatcmp(data *a, data *b, int num) {
for(int i = 0; i < num; i++) {
if(a[i].x != b[i].x || a[i].y != b[i].y) {
return false;
}
}
return true;
}
*/
float mysqrt(data a, data b) {
float x = a.x - b.x;
float y = a.y - b.y;
return std::sqrt(x*x + y*y);
}
bool datacmp(data *a, data *b, int num) {
for(int i = 0; i < num; i++) {
if( mysqrt(a[i], b[i]) > 1 ) {
return false;
}
}
return true;
}
void map(HadoopPipes::MapContext& context) {
// input format
// --num of clusters ( == k)
// --num of data( == n)
// --initial centers for all clusters;
// --input rows;
// fprintf(stderr, "start\n");
int mp;
double t[10];
t[0] = gettime();
std::vector<std::string> elements
= HadoopUtils::splitString(context.getInputValue(), " ");
t[1] = gettime();
const int k = HadoopUtils::toInt(elements[0]);
const int n = HadoopUtils::toInt(elements[1]);
// c[] : pos of cluster
// d[] : data
// ndata[] : num of data for each cluster
data c[2][k];
data d[n];
int ndata[k+1];
int i, cur, next, iter;
//for Device
data *dc;
data *dd;
int *dndata;
//initialize
for(i = 0; i < k; i++) {
c[0][i].x = HadoopUtils::toFloat(elements[2*i+2]);
c[0][i].y = HadoopUtils::toFloat(elements[2*i+3]);
}
for(i = 0; i < n; i++) {
d[i].x = HadoopUtils::toFloat(elements[2*i+2*k+2]);
d[i].y = HadoopUtils::toFloat(elements[2*i+2*k+3]);
}
t[2] = gettime();
#ifdef DEBUG
for(i = 0; i < k; i++) {
std::cout << c[0][i].x << " " << c[0][i].y;
}
std::cout << '\n';
for(i = 0; i < n; i++)
std::cout << d[i].x << " " << d[i].y << " ";
std::cout << '\n';
#endif
//cuda init
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
mp = prop.multiProcessorCount;
cudaError_t err = cudaMalloc((void **)&dc, sizeof(data)*2*k);
if(err != cudaSuccess) {
std::cerr << "Malloc_1 failed: " << cudaGetErrorString(err) << ".\n";
}
err = cudaMalloc((void **)&dd, sizeof(data)*n);
if(err != cudaSuccess) {
std::cerr << "Malloc_2 failed: " << cudaGetErrorString(err) << ".\n";
}
err = cudaMalloc((void **)&dndata, sizeof(int)*(k+1));
if(err != cudaSuccess) {
std::cerr << "Malloc_3 failed: " << cudaGetErrorString(err) << ".\n";
}
t[3] = gettime();
// fprintf(stderr, "mid\n");
// buffer id
cur = 0;
next = 1;
iter = 0;
do {
iter++;
// for(int j = 0; j < 10; j++) {
init_int(ndata, k+1);
//data object assignment
cudaMemcpy(dc + cur*k, c[cur], sizeof(data)*k, cudaMemcpyHostToDevice);
cudaMemcpy(dd, d, sizeof(data)*n, cudaMemcpyHostToDevice);
assign_data<<<mp,512>>>(dc + cur*k, dd, n, k);
err = cudaMemcpy(d, dd, sizeof(data)*n, cudaMemcpyDeviceToHost);
if(err != cudaSuccess) {
std::cerr << "Memcpy_1 failed: " << cudaGetErrorString(err) << ".\n";
}
t[4] = gettime();
#ifdef DEBUG
for(i = 0; i < n; i++)
std::cout << d[i].cent << " ";
std::cout << '\n';
#endif
//rearranges all data objects
//and counts the nunber of data objects contained by each cluster
sort_by_cent(d, 0, n-1);
count_data_in_cluster(d, ndata, n, k);
t[5] = gettime();
#ifdef DEBUG
for(i = 0; i < k+1; i++)
std::cout << ndata[i] << " ";
std::cout << '\n';
#endif
//K centroids recalculation
err = cudaMemcpy(dndata, ndata, sizeof(int)*(k+1), cudaMemcpyHostToDevice);
if(err != cudaSuccess) {
std::cerr << "Memcpy_2_1 failed: " << cudaGetErrorString(err) << ".\n";
} else {
std::cerr << "Memcpy_2_1 success: " << cudaGetErrorString(err) << ".\n";
}
err = cudaMemcpy(dc + next*k, c[next], sizeof(data)*k, cudaMemcpyHostToDevice);
if(err != cudaSuccess) {
std::cerr << "Memcpy_2_2 failed: " << cudaGetErrorString(err) << ".\n";
} else {
std::cerr << "Memcpy_2_2 success: " << cudaGetErrorString(err) << ".\n";
}
err = cudaMemcpy(dd, d, sizeof(data)*n, cudaMemcpyHostToDevice);
if(err != cudaSuccess) {
std::cerr << "Memcpy_2_3 failed: " << cudaGetErrorString(err) << ".\n";
} else {
std::cerr << "Memcpy_2_3 success: " << cudaGetErrorString(err) << ".\n";
}
// centroids_recalc<<<1,k>>>(dc + next*k, dd, dndata);
centroids_recalc<<<k,64>>>(dc + next*k, dd, dndata);
err = cudaMemcpy(c[next], dc + next*k, sizeof(data)*k, cudaMemcpyDeviceToHost);
if(err != cudaSuccess) {
std::cerr << "Memcpy_2_4 failed: " << cudaGetErrorString(err) << ".\n";
} else {
std::cerr << "Memcpy_2_4 success: " << cudaGetErrorString(err) << ".\n";
}
t[6] = gettime();
#ifdef DEBUG
for(i = 0; i < k; i++)
std::cout << c[next][i].x << " " << c[next][i].y << " ";
std::cout << "\n\n";
#endif
cur = 1 - cur;
next = 1 - next;
} while( datacmp(c[cur], c[next], k) == false && iter < 100);
// }
// fprintf(stderr, "finish\n");
//emit
//key : cluster id
//value : cluster centroid position
for(i = 0; i < k; i++) {
context.emit(context.getInputKey() + '\t' + HadoopUtils::toString(i),
HadoopUtils::toString((int)c[cur][i].x) + '\t'
+ HadoopUtils::toString((int)c[cur][i].y));
}
t[7] = gettime();
std::cout << "Run on GPU" << '\n';
std::cout << "iter : " << iter << '\n';
for(i = 0; i < 7; i++) {
std::cout << t[i+1] - t[i] << '\n';
}
std::cout << t[7] - t[0] << '\n';
std::cout << '\n';
cudaFree(dc);
cudaFree(dd);
cudaFree(dndata);
}
};
class KmeansReduce: public HadoopPipes::Reducer {
public:
KmeansReduce(HadoopPipes::TaskContext& context){}
void reduce(HadoopPipes::ReduceContext& context) {
while(context.nextValue()) {
context.emit(context.getInputKey(), context.getInputValue());
}
}
};
int main(int argc, char *argv[]) {
if(argc > 1) {
deviceID = atoi(argv[1]);
std::cout << "deviceID: " << deviceID << ".\n";
}
return HadoopPipes::runTask(HadoopPipes::TemplateFactory<KmeansMap,
KmeansReduce>());
}
|
61abe02f4d80dd2ac71c888e25659faf0a6bef01.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include "wb.h"
__device__ int binarySearch(const int value, const int *A, const int N) {
int left = 0;
int right = N - 1;
int location = N;
while (left <= right) {
int middle = (left + right) / 2;
if ((blockIdx.y == 0) ? // Check if operating on A or B
(A[middle] <= value) : // If A, only return after value increases
(A[middle] < value)) { // If B, return as soon as you meet the value or it's larger
left = middle + 1;
} else {
location = middle;
right = middle - 1;
}
}
return location;
}
__device__ int linearSearch(const int value, const int *A, const int N) {
for (int i = 0; i < N; i++) {
if ((blockIdx.y == 0) ? // Check if operating on A or B
(A[i] > value) : // If A, only return after value increases
(A[i] >= value)) { // If B, return as soon as you meet the value or it's larger
return i;
}
}
return N;
}
__global__ void merge(int *C, const int *A, const int *B, const int N) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < N) {
// Operate on a different array based on which block we're on.
const int *source_array = (blockIdx.y == 0) ? A : B;
const int *search_array = (blockIdx.y == 0) ? B : A;
int i = linearSearch(source_array[threadId], search_array, N);
C[threadId + i] = source_array[threadId];
}
}
int main(int argc, char **argv) {
wbArg_t args;
int N;
int *A;
int *B;
int *C;
int *deviceA;
int *deviceB;
int *deviceC;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
A = (int *) wbImport(wbArg_getInputFile(args, 0), &N, NULL, "Integer");
B = (int *) wbImport(wbArg_getInputFile(args, 1), &N, NULL, "Integer");
C = (int *) malloc(2 * N * sizeof(int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", N);
int threads = 256;
int blocks = (N + threads - 1) / threads;
wbTime_start(GPU, "Allocating GPU memory.");
hipMalloc((void **) &deviceA, N * sizeof(int));
hipMalloc((void **) &deviceB, N * sizeof(int));
hipMalloc((void **) &deviceC, 2 * N * sizeof(int));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
hipMemcpy(deviceA, A, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(deviceB, B, N * sizeof(int), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Perform on CUDA.
const dim3 blockSize(threads, 1, 1);
const dim3 gridSize(blocks, 2, 1);
wbTime_start(Compute, "Performing CUDA computation");
hipLaunchKernelGGL(( merge) , dim3(gridSize), dim3(blockSize), 0, 0, deviceC, deviceA, deviceB, N);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
hipMemcpy(C, deviceC, 2 * N * sizeof(int), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
hipFree(deviceA);
hipFree(deviceB);
hipFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, C, 2 * N);
free(A);
free(B);
free(C);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
| 61abe02f4d80dd2ac71c888e25659faf0a6bef01.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "wb.h"
__device__ int binarySearch(const int value, const int *A, const int N) {
int left = 0;
int right = N - 1;
int location = N;
while (left <= right) {
int middle = (left + right) / 2;
if ((blockIdx.y == 0) ? // Check if operating on A or B
(A[middle] <= value) : // If A, only return after value increases
(A[middle] < value)) { // If B, return as soon as you meet the value or it's larger
left = middle + 1;
} else {
location = middle;
right = middle - 1;
}
}
return location;
}
__device__ int linearSearch(const int value, const int *A, const int N) {
for (int i = 0; i < N; i++) {
if ((blockIdx.y == 0) ? // Check if operating on A or B
(A[i] > value) : // If A, only return after value increases
(A[i] >= value)) { // If B, return as soon as you meet the value or it's larger
return i;
}
}
return N;
}
__global__ void merge(int *C, const int *A, const int *B, const int N) {
int threadId = blockDim.x * blockIdx.x + threadIdx.x;
if (threadId < N) {
// Operate on a different array based on which block we're on.
const int *source_array = (blockIdx.y == 0) ? A : B;
const int *search_array = (blockIdx.y == 0) ? B : A;
int i = linearSearch(source_array[threadId], search_array, N);
C[threadId + i] = source_array[threadId];
}
}
int main(int argc, char **argv) {
wbArg_t args;
int N;
int *A;
int *B;
int *C;
int *deviceA;
int *deviceB;
int *deviceC;
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
A = (int *) wbImport(wbArg_getInputFile(args, 0), &N, NULL, "Integer");
B = (int *) wbImport(wbArg_getInputFile(args, 1), &N, NULL, "Integer");
C = (int *) malloc(2 * N * sizeof(int));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The input length is ", N);
int threads = 256;
int blocks = (N + threads - 1) / threads;
wbTime_start(GPU, "Allocating GPU memory.");
cudaMalloc((void **) &deviceA, N * sizeof(int));
cudaMalloc((void **) &deviceB, N * sizeof(int));
cudaMalloc((void **) &deviceC, 2 * N * sizeof(int));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
cudaMemcpy(deviceA, A, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, B, N * sizeof(int), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
// Perform on CUDA.
const dim3 blockSize(threads, 1, 1);
const dim3 gridSize(blocks, 2, 1);
wbTime_start(Compute, "Performing CUDA computation");
merge <<<gridSize, blockSize>>>(deviceC, deviceA, deviceB, N);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
cudaMemcpy(C, deviceC, 2 * N * sizeof(int), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
wbTime_start(GPU, "Freeing GPU Memory");
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
wbTime_stop(GPU, "Freeing GPU Memory");
wbSolution(args, C, 2 * N);
free(A);
free(B);
free(C);
#if LAB_DEBUG
system("pause");
#endif
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.