hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
46967c255112f05d7ea69bdc3d280f4561902624.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Utilities and system includes
#include <helper_cuda.h>
#ifndef USE_TEXTURE_RGBA8UI
texture<float4, 2, hipReadModeElementType> inTex;
#else
texture<uchar4, 2, hipReadModeElementType> inTex;
#endif
// clamp x to range [a, b]
__device__ float clamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__ int clamp(int x, int a, int b)
{
return max(a, min(b, x));
}
// convert floating point rgb color to 8-bit integer
__device__ int rgbToInt(float r, float g, float b)
{
r = clamp(r, 0.0f, 255.0f);
g = clamp(g, 0.0f, 255.0f);
b = clamp(b, 0.0f, 255.0f);
return (int(b)<<16) | (int(g)<<8) | int(r);
}
// get pixel from 2D image, with clamping to border
__device__ uchar4 getPixel(int x, int y)
{
#ifndef USE_TEXTURE_RGBA8UI
float4 res = tex2D(inTex, x, y);
uchar4 ucres = make_uchar4(res.x*255.0f, res.y*255.0f, res.z*255.0f, res.w*255.0f);
#else
uchar4 ucres = tex2D(inTex, x, y);
#endif
return ucres;
}
// macros to make indexing shared memory easier
#define SMEM(X, Y) sdata[(Y)*tilew+(X)]
/*
2D convolution using shared memory
- operates on 8-bit RGB data stored in 32-bit int
- assumes kernel radius is less than or equal to block size
- not optimized for performance
_____________
| : : |
|_ _:_____:_ _|
| | | |
| | | |
|_ _|_____|_ _|
r | : : |
|___:_____:___|
r bw r
<----tilew---->
*/
__global__ void
cudaProcess(unsigned int *g_odata, int imgw, int imgh,
int tilew, int r, float threshold, float highlight, float w0, float w1, float w2, float w3, float w4, float w5, float w6, float w7, float w8)
{
extern __shared__ uchar4 sdata[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
#if 0
uchar4 c4 = getPixel(x, y);
g_odata[y*imgw+x] = rgbToInt(c4.z, c4.y, c4.x);
#else
// copy tile to shared memory
// center region
SMEM(r + tx, r + ty) = getPixel(x, y);
// borders
if (threadIdx.x < r)
{
// left
SMEM(tx, r + ty) = getPixel(x - r, y);
// right
SMEM(r + bw + tx, r + ty) = getPixel(x + bw, y);
}
if (threadIdx.y < r)
{
// top
SMEM(r + tx, ty) = getPixel(x, y - r);
// bottom
SMEM(r + tx, r + bh + ty) = getPixel(x, y + bh);
}
// load corners
if ((threadIdx.x < r) && (threadIdx.y < r))
{
// tl
SMEM(tx, ty) = getPixel(x - r, y - r);
// bl
SMEM(tx, r + bh + ty) = getPixel(x - r, y + bh);
// tr
SMEM(r + bw + tx, ty) = getPixel(x + bh, y - r);
// br
SMEM(r + bw + tx, r + bh + ty) = getPixel(x + bw, y + bh);
}
// wait for loads to complete
__syncthreads();
// perform convolution
float rsum = 0.0f;
float gsum = 0.0f;
float bsum = 0.0f;
float samples = 0.0f;
float weightSum = 0.0f;
float convMat[9] = {w0,w1,w2,w3,w4,w5,w6,w7,w8};
for(int i = 0; i < 9; i++)
weightSum += convMat[i];
for (int dy=-r; dy<=r; dy++)
{
for (int dx=-r; dx<=r; dx++)
{
#if 0
// try this to see the benefit of using shared memory
uchar4 pixel = getPixel(x+dx, y+dy);
#else
uchar4 pixel = SMEM(r+tx+dx, r+ty+dy);
#endif
// only sum pixels within disc-shaped kernel
//float l = dx*dx + dy*dy;
//if (l <= r*r)
//{
int index = (dx+1)+(dy+1)*3;
float r = float(pixel.x);//*convMat[index];
float g = float(pixel.y);//*convMat[index];
float b = float(pixel.z);//*convMat[index];
#if 1
r *= convMat[index];
g *= convMat[index];
b *= convMat[index];
#endif
rsum += r;
gsum += g;
bsum += b;
samples += 1.0f;
//}
}
}
//rsum /= weightSum/4;
//gsum /= weightSum/4;
//bsum /= weightSum/4;
// ABGR
g_odata[y*imgw+x] = rgbToInt(rsum, gsum, bsum);
//g_odata[y*imgw+x] = rgbToInt(x,y,0);
#endif
}
extern "C" void
launch_cudaProcess(dim3 grid, dim3 block, int sbytes,
hipArray *g_data_array, unsigned int *g_odata,
int imgw, int imgh, int tilew,
int radius, float threshold, float highlight, float convMat[])
{
checkCudaErrors(hipBindTextureToArray(inTex, g_data_array));
struct hipChannelFormatDesc desc;
checkCudaErrors(hipGetChannelDesc(&desc, g_data_array));
#if 0
printf("CUDA Array channel descriptor, bits per component:\n");
printf("X %d Y %d Z %d W %d, kind %d\n",
desc.x,desc.y,desc.z,desc.w,desc.f);
printf("Possible values for channel format kind: i %d, u%d, f%d:\n",
hipChannelFormatKindSigned, hipChannelFormatKindUnsigned,
hipChannelFormatKindFloat);
#endif
//float convMat[9];
//for(int count = 0; count < 9; count++){
// convMat[count] = 1.0;
//}
float w0 = convMat[0];
float w1 = convMat[1];
float w2 = convMat[2];
float w3 = convMat[3];
float w4 = convMat[4];
float w5 = convMat[5];
float w6 = convMat[6];
float w7 = convMat[7];
float w8 = convMat[8];
//printf("\n");
#ifdef GPU_PROFILING
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
int nIter = 30;
for (int i = -1; i < nIter; ++i)
{
if (i == 0)
{
sdkStartTimer(&timer);
}
#endif
hipLaunchKernelGGL(( cudaProcess), dim3(grid), dim3(block), sbytes , 0, g_odata, imgw, imgh,
block.x+(2*radius), radius, 0.8f, 4.0f, w0,w1,w2,w3,w4,w5,w6,w7,w8);
#ifdef GPU_PROFILING
}
hipDeviceSynchronize();
sdkStopTimer(&timer);
double dSeconds = sdkGetTimerValue(&timer)/((double)nIter * 1000.0);
double dNumTexels = (double)imgw * (double)imgh;
double mtexps = 1.0e-6 * dNumTexels/dSeconds;
if (radius == 4)
{
printf("\n");
printf("postprocessGL, Throughput = %.4f MTexels/s, Time = %.5f s, Size = %.0f Texels, NumDevsUsed = %d, Workgroup = %u\n",
mtexps, dSeconds, dNumTexels, 1, block.x * block.y);
}
#endif
}
| 46967c255112f05d7ea69bdc3d280f4561902624.cu | /*
* Copyright 1993-2014 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Utilities and system includes
#include <helper_cuda.h>
#ifndef USE_TEXTURE_RGBA8UI
texture<float4, 2, cudaReadModeElementType> inTex;
#else
texture<uchar4, 2, cudaReadModeElementType> inTex;
#endif
// clamp x to range [a, b]
__device__ float clamp(float x, float a, float b)
{
return max(a, min(b, x));
}
__device__ int clamp(int x, int a, int b)
{
return max(a, min(b, x));
}
// convert floating point rgb color to 8-bit integer
__device__ int rgbToInt(float r, float g, float b)
{
r = clamp(r, 0.0f, 255.0f);
g = clamp(g, 0.0f, 255.0f);
b = clamp(b, 0.0f, 255.0f);
return (int(b)<<16) | (int(g)<<8) | int(r);
}
// get pixel from 2D image, with clamping to border
__device__ uchar4 getPixel(int x, int y)
{
#ifndef USE_TEXTURE_RGBA8UI
float4 res = tex2D(inTex, x, y);
uchar4 ucres = make_uchar4(res.x*255.0f, res.y*255.0f, res.z*255.0f, res.w*255.0f);
#else
uchar4 ucres = tex2D(inTex, x, y);
#endif
return ucres;
}
// macros to make indexing shared memory easier
#define SMEM(X, Y) sdata[(Y)*tilew+(X)]
/*
2D convolution using shared memory
- operates on 8-bit RGB data stored in 32-bit int
- assumes kernel radius is less than or equal to block size
- not optimized for performance
_____________
| : : |
|_ _:_____:_ _|
| | | |
| | | |
|_ _|_____|_ _|
r | : : |
|___:_____:___|
r bw r
<----tilew---->
*/
__global__ void
cudaProcess(unsigned int *g_odata, int imgw, int imgh,
int tilew, int r, float threshold, float highlight, float w0, float w1, float w2, float w3, float w4, float w5, float w6, float w7, float w8)
{
extern __shared__ uchar4 sdata[];
int tx = threadIdx.x;
int ty = threadIdx.y;
int bw = blockDim.x;
int bh = blockDim.y;
int x = blockIdx.x*bw + tx;
int y = blockIdx.y*bh + ty;
#if 0
uchar4 c4 = getPixel(x, y);
g_odata[y*imgw+x] = rgbToInt(c4.z, c4.y, c4.x);
#else
// copy tile to shared memory
// center region
SMEM(r + tx, r + ty) = getPixel(x, y);
// borders
if (threadIdx.x < r)
{
// left
SMEM(tx, r + ty) = getPixel(x - r, y);
// right
SMEM(r + bw + tx, r + ty) = getPixel(x + bw, y);
}
if (threadIdx.y < r)
{
// top
SMEM(r + tx, ty) = getPixel(x, y - r);
// bottom
SMEM(r + tx, r + bh + ty) = getPixel(x, y + bh);
}
// load corners
if ((threadIdx.x < r) && (threadIdx.y < r))
{
// tl
SMEM(tx, ty) = getPixel(x - r, y - r);
// bl
SMEM(tx, r + bh + ty) = getPixel(x - r, y + bh);
// tr
SMEM(r + bw + tx, ty) = getPixel(x + bh, y - r);
// br
SMEM(r + bw + tx, r + bh + ty) = getPixel(x + bw, y + bh);
}
// wait for loads to complete
__syncthreads();
// perform convolution
float rsum = 0.0f;
float gsum = 0.0f;
float bsum = 0.0f;
float samples = 0.0f;
float weightSum = 0.0f;
float convMat[9] = {w0,w1,w2,w3,w4,w5,w6,w7,w8};
for(int i = 0; i < 9; i++)
weightSum += convMat[i];
for (int dy=-r; dy<=r; dy++)
{
for (int dx=-r; dx<=r; dx++)
{
#if 0
// try this to see the benefit of using shared memory
uchar4 pixel = getPixel(x+dx, y+dy);
#else
uchar4 pixel = SMEM(r+tx+dx, r+ty+dy);
#endif
// only sum pixels within disc-shaped kernel
//float l = dx*dx + dy*dy;
//if (l <= r*r)
//{
int index = (dx+1)+(dy+1)*3;
float r = float(pixel.x);//*convMat[index];
float g = float(pixel.y);//*convMat[index];
float b = float(pixel.z);//*convMat[index];
#if 1
r *= convMat[index];
g *= convMat[index];
b *= convMat[index];
#endif
rsum += r;
gsum += g;
bsum += b;
samples += 1.0f;
//}
}
}
//rsum /= weightSum/4;
//gsum /= weightSum/4;
//bsum /= weightSum/4;
// ABGR
g_odata[y*imgw+x] = rgbToInt(rsum, gsum, bsum);
//g_odata[y*imgw+x] = rgbToInt(x,y,0);
#endif
}
extern "C" void
launch_cudaProcess(dim3 grid, dim3 block, int sbytes,
cudaArray *g_data_array, unsigned int *g_odata,
int imgw, int imgh, int tilew,
int radius, float threshold, float highlight, float convMat[])
{
checkCudaErrors(cudaBindTextureToArray(inTex, g_data_array));
struct cudaChannelFormatDesc desc;
checkCudaErrors(cudaGetChannelDesc(&desc, g_data_array));
#if 0
printf("CUDA Array channel descriptor, bits per component:\n");
printf("X %d Y %d Z %d W %d, kind %d\n",
desc.x,desc.y,desc.z,desc.w,desc.f);
printf("Possible values for channel format kind: i %d, u%d, f%d:\n",
cudaChannelFormatKindSigned, cudaChannelFormatKindUnsigned,
cudaChannelFormatKindFloat);
#endif
//float convMat[9];
//for(int count = 0; count < 9; count++){
// convMat[count] = 1.0;
//}
float w0 = convMat[0];
float w1 = convMat[1];
float w2 = convMat[2];
float w3 = convMat[3];
float w4 = convMat[4];
float w5 = convMat[5];
float w6 = convMat[6];
float w7 = convMat[7];
float w8 = convMat[8];
//printf("\n");
#ifdef GPU_PROFILING
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
int nIter = 30;
for (int i = -1; i < nIter; ++i)
{
if (i == 0)
{
sdkStartTimer(&timer);
}
#endif
cudaProcess<<< grid, block, sbytes >>>(g_odata, imgw, imgh,
block.x+(2*radius), radius, 0.8f, 4.0f, w0,w1,w2,w3,w4,w5,w6,w7,w8);
#ifdef GPU_PROFILING
}
cudaDeviceSynchronize();
sdkStopTimer(&timer);
double dSeconds = sdkGetTimerValue(&timer)/((double)nIter * 1000.0);
double dNumTexels = (double)imgw * (double)imgh;
double mtexps = 1.0e-6 * dNumTexels/dSeconds;
if (radius == 4)
{
printf("\n");
printf("postprocessGL, Throughput = %.4f MTexels/s, Time = %.5f s, Size = %.0f Texels, NumDevsUsed = %d, Workgroup = %u\n",
mtexps, dSeconds, dNumTexels, 1, block.x * block.y);
}
#endif
}
|
cs_perm_selection.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <math.h>
#include "cs_dbg.h"
#include "cs_cuda.h"
#include "cs_helper.h"
#include "cs_perm_selection.h"
// #define CUDA_DBG
#define CUDA_DBG1
__global__ void d_do_perm_selection_L ( int *dp, int tbl_size,
int *cubep, int cube_size, int random )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int i ;
while ( t_idx < cube_size )
{
i = cubep[ t_idx ] ;
i = ( i + random ) % tbl_size ;
// dp[ i ] = t_idx ;
dp[ t_idx ] = i ; //
t_idx += CUDA_MAX_THREADS ;
}
}
void
h_do_perm_selection_L ( int *d_perm_tbl, int tbl_size, int *d_perm_tbl_cube,
int cube_size, int random, int sink )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; //= ( cube_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
// note: the nBlocks is based on cube_size ;
#ifdef CUDA_OBS
fprintf(stderr, "%s: perm %p tblsize %d cube %p cubesize %d random %d\n",
__func__, d_perm_tbl, tbl_size, d_perm_tbl_cube, cube_size,
random ) ;
#endif
set_device_mem_i ( d_perm_tbl, tbl_size, ( sink + random ) % tbl_size ) ;
h_block_adj ( cube_size, nThreadsPerBlock, &nBlocks ) ;
hipLaunchKernelGGL(( d_do_perm_selection_L) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
d_perm_tbl, tbl_size, d_perm_tbl_cube, cube_size, random ) ;
hipDeviceSynchronize() ;
#ifdef CUDA_DBG
dbg_p_d_data_i("h_do_perm_selection_L", d_perm_tbl, tbl_size ) ;
#endif
}
__global__ void d_do_perm_selection_R ( int *dp, int tbl_size, int random )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int i ;
while ( t_idx < tbl_size )
{
if ( t_idx == 0 )
dp[ t_idx ] = 0 ;
else
{
i = t_idx + random ;
dp[ t_idx ] = i % tbl_size ;
if ( i / tbl_size )
dp[ t_idx ]++ ; // take out 0
}
t_idx += CUDA_MAX_THREADS ;
}
}
void
h_do_perm_selection_R ( int *d_perm_tbl, int tbl_size, int random )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; //= ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
#ifdef CUDA_OBS
fprintf( stderr, "%s: perm %p size %d random %d\n",
__func__, d_perm_tbl, tbl_size, random ) ;
#endif
#ifdef CUDA_DBG
if ( tbl_size <= random )
fprintf( stderr, "%s: ERROR tblsize %d >= random %d\n",
__func__, tbl_size, random ) ;
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
hipLaunchKernelGGL(( d_do_perm_selection_R) , dim3(nBlocks), dim3(nThreadsPerBlock) , 0, 0,
d_perm_tbl, tbl_size, random ) ;
hipDeviceSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_i("h_do_perm_selection_R", d_perm_tbl, tbl_size ) ;
#endif
}
/*
h_do_get_perm_matrix:
the purpose of this routine is to mark the location, i.e. index, of the elements inside
the cube in relation to the location inside the block
*/
void
h_do_get_perm_matrix( int *dp, int ox, int oy, int oz,
int cx, int cy, int cz, int *sinkp )
{
int sink = -1, idx, i, j, k, frame_size ;
frame_size = ox * oy ;
for ( i = 0 ; i < cz ; i++ )
{
idx = i * frame_size ;
for ( j = 0 ; j < cy ; j++ )
{
for ( k = 0 ; k < cx ; k++ )
*dp++ = idx++ ;
if (( sink < 0 ) && ox != cx )
sink = cx ;
idx += ( ox - cx ) ;
}
if (( sink < 0 ) && oy != cy )
sink = cy * ox ;
}
if ( sink < 0 )
{
if ( oz != cz )
sink = frame_size * cz ;
else
sink = 0 ; // will be over-written anyway, so just give a number
}
*sinkp = sink ;
}
// try to fit shifting cube in block ...
int
cube_size( int *p )
{
int k, i ;
k = 1 ;
i = 3 ;
while ( i-- )
k *= *p++ ;
return ( k ) ;
}
void
ck_blk( char *s, int *small, int *large )
{
int i ;
i = 3 ;
while ( i-- )
{
if ( small[i] > large[i] )
{
printf("%s: %s small %d %d %d large %d %d %d\n",
__func__, s,
small[0], small[1], small[2],
large[0], large[1], large[2]) ;
exit( 33 ) ;
}
}
}
void
ck_size( const char *s, int *p, int size )
{
int k, i ;
k = 1 ;
i = 3 ;
while ( i-- )
k *= *p++ ;
if ( k > size )
{
printf("%s: %s got %d need %d\n", __func__, s, k, size ) ;
exit( 33 ) ;
}
}
int
h_do_find_perm_size( int bx, int by, int bz, int *cx,
int *cy, int *cz, int max_z, int nmea, int min_x, int min_y )
{
double f ;
int dox, done_once, bb[3], cc[3], xy, yz, xz, i, j, k ;
bb[0] = bx ; // block
bb[1] = by ;
bb[2] = bz ;
#ifdef CUDA_DBG1
printf("%s: block %d %d %d cube %d %d %d max_z %d nmea %d min x/y %d %d\n",
__func__, bx, by, bz, *cx, *cy, *cz, max_z, nmea, min_x, min_y ) ;
#endif
k = cube_size( bb ) ;
if ( nmea >= k )
{
*cx = bx ;
*cy = by ;
*cz = bz ;
return ( 1 ) ;
}
cc[0] = *cx ; // selection
cc[1] = *cy ;
cc[2] = *cz ;
if (( cc[0] > bb[0] ) || ( cc[1] > bb[1] ) || ( cc[2] > bb[2] ))
{
#ifdef CUDA_DBG1
printf("size mismatch: %d %d %d -- %d %d %d -- %d\n", cc[0], cc[1], cc[2],
bb[0], bb[1], bb[2], nmea ) ;
#endif
return ( 0 ) ;
}
#ifdef CUDA_DBG1
printf("%s: init: %d %d %d -- %d %d %d -- %d\n", __func__, cc[0], cc[1],
cc[2], bb[0], bb[1], bb[2], nmea ) ;
#endif
i = cube_size( cc ) ;
if ( !i )
{
#ifdef CUDA_DBG1
printf("%s: size 0: %d %d %d -- %d %d %d -- %d\n", __func__,
cc[0], cc[1], cc[2], bb[0], bb[1], bb[2], nmea ) ;
#endif
return ( 0 ) ;
}
f = ( double )nmea / ( double )i ;
#ifdef CUDA_DBG1
printf("2: f %f i %d \n", f, i ) ;
#endif
if ( f < 1.0 ) // razi ...
{
#ifdef CUDA_DBG1
printf("%s:less than 1.0: %d %d %d -- %d %d %d -- %d f %f\n",
__func__, cc[0], cc[1], cc[2], bb[0], bb[1], bb[2], nmea, f ) ;
#endif
return ( 0 ) ;
}
f = pow ( f, 1.0/3.0 ) ;
// it will not shrink ... razi
i = 3 ;
while ( i-- )
cc[i] = ( int )( f * ( double ) cc[i] ) ;
if ( cc[2] > max_z )
cc[2] = max_z ;
#ifdef CUDA_DBG1
printf("%s: max: %d %d %d t %d -- %f mea %d \n", __func__,
cc[0], cc[1], cc[2], cube_size( cc ), f, nmea ) ;
#endif
#ifdef CUDA_DBG1
ck_size( "first adjust", cc, nmea ) ;
#endif
// ok ... less than nmeas ... make sure it is inside the blk
i = 3 ;
while ( i-- )
{
if ( cc[i] > bb[i] )
{
f = (( double ) bb[i] ) / (( double ) cc[i] ) ;
for ( j = 0 ; j < 3 ; j++ )
cc[j] = ( int )(( double )cc[j] * f + 0.5 ) ; // round up
}
}
if ( cc[2] > max_z )
cc[2] = max_z ;
if ( cc[0] < min_x )
cc[0] = min_x ;
if ( cc[1] < min_y )
cc[1] = min_y ;
i = nmea / ( cc[0] * cc[1] ) ;
if ( cc[2] > i )
cc[2] = i ;
#ifdef CUDA_DBG1
ck_size( "inside the box", cc, nmea ) ;
#endif
// ok ... less than nmeas
// ok ... inside the block
#ifdef CUDA_DBG1
printf("%s: inside the box: %d %d %d t %d -- %f -- max %d\n", __func__,
cc[0], cc[1], cc[2], cc[0]* cc[1]* cc[2], f, max_z ) ;
#endif
// ok ... now increase the size ...
done_once = 0 ;
dox = 1 ;
while ( 1 )
{
xy = cc[0] * cc[1] ;
xz = cc[0] * cc[2] ;
yz = cc[1] * cc[2] ;
k = nmea - cube_size( cc ) ;
done_once++ ;
if (( cc[0] > min_x ) && ( cc[1] > min_y ) && ( k >= xy ) && ( cc[2] < bz ) && ( cc[2] < max_z ))
{
cc[2]++ ;
done_once = 0 ;
} else
{
if ( dox )
{
dox = 0 ;
if (( k >= yz ) && ( cc[0] < bx ))
{
done_once = 0 ;
cc[0]++ ;
} else if (( k >= xz ) && ( cc[1] < by ))
{
cc[1]++ ;
done_once = 0 ;
}
} else
{
dox = 1 ;
if (( k >= xz ) && ( cc[1] < by ))
{
cc[1]++ ;
done_once = 0 ;
} else if (( k >= yz ) && ( cc[0] < bx ))
{
cc[0]++ ;
done_once = 0 ;
}
}
}
#ifdef CUDA_DBG1
printf("%s: searching: %d %d %d t %d -- done %d\n", __func__,
cc[0], cc[1], cc[2], cube_size( cc ), done_once ) ;
#endif
if ( done_once == 3 )
break ;
}
#ifdef CUDA_OBS1
printf("%s: winner: %d %d %d t %d %d -- %d\n", __func__,
cc[0], cc[1], cc[2], cube_size( cc ), nmea, nmea - cube_size(cc) ) ;
#endif
*cx = cc[0] ;
*cy = cc[1] ;
*cz = cc[2] ;
return ( 1 ) ;
}
| cs_perm_selection.cu | #include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <math.h>
#include "cs_dbg.h"
#include "cs_cuda.h"
#include "cs_helper.h"
#include "cs_perm_selection.h"
// #define CUDA_DBG
#define CUDA_DBG1
__global__ void d_do_perm_selection_L ( int *dp, int tbl_size,
int *cubep, int cube_size, int random )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int i ;
while ( t_idx < cube_size )
{
i = cubep[ t_idx ] ;
i = ( i + random ) % tbl_size ;
// dp[ i ] = t_idx ;
dp[ t_idx ] = i ; //
t_idx += CUDA_MAX_THREADS ;
}
}
void
h_do_perm_selection_L ( int *d_perm_tbl, int tbl_size, int *d_perm_tbl_cube,
int cube_size, int random, int sink )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; //= ( cube_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
// note: the nBlocks is based on cube_size ;
#ifdef CUDA_OBS
fprintf(stderr, "%s: perm %p tblsize %d cube %p cubesize %d random %d\n",
__func__, d_perm_tbl, tbl_size, d_perm_tbl_cube, cube_size,
random ) ;
#endif
set_device_mem_i ( d_perm_tbl, tbl_size, ( sink + random ) % tbl_size ) ;
h_block_adj ( cube_size, nThreadsPerBlock, &nBlocks ) ;
d_do_perm_selection_L <<< nBlocks, nThreadsPerBlock >>> (
d_perm_tbl, tbl_size, d_perm_tbl_cube, cube_size, random ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_DBG
dbg_p_d_data_i("h_do_perm_selection_L", d_perm_tbl, tbl_size ) ;
#endif
}
__global__ void d_do_perm_selection_R ( int *dp, int tbl_size, int random )
{
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
int i ;
while ( t_idx < tbl_size )
{
if ( t_idx == 0 )
dp[ t_idx ] = 0 ;
else
{
i = t_idx + random ;
dp[ t_idx ] = i % tbl_size ;
if ( i / tbl_size )
dp[ t_idx ]++ ; // take out 0
}
t_idx += CUDA_MAX_THREADS ;
}
}
void
h_do_perm_selection_R ( int *d_perm_tbl, int tbl_size, int random )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; //= ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
#ifdef CUDA_OBS
fprintf( stderr, "%s: perm %p size %d random %d\n",
__func__, d_perm_tbl, tbl_size, random ) ;
#endif
#ifdef CUDA_DBG
if ( tbl_size <= random )
fprintf( stderr, "%s: ERROR tblsize %d >= random %d\n",
__func__, tbl_size, random ) ;
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
d_do_perm_selection_R <<< nBlocks, nThreadsPerBlock >>> (
d_perm_tbl, tbl_size, random ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_i("h_do_perm_selection_R", d_perm_tbl, tbl_size ) ;
#endif
}
/*
h_do_get_perm_matrix:
the purpose of this routine is to mark the location, i.e. index, of the elements inside
the cube in relation to the location inside the block
*/
void
h_do_get_perm_matrix( int *dp, int ox, int oy, int oz,
int cx, int cy, int cz, int *sinkp )
{
int sink = -1, idx, i, j, k, frame_size ;
frame_size = ox * oy ;
for ( i = 0 ; i < cz ; i++ )
{
idx = i * frame_size ;
for ( j = 0 ; j < cy ; j++ )
{
for ( k = 0 ; k < cx ; k++ )
*dp++ = idx++ ;
if (( sink < 0 ) && ox != cx )
sink = cx ;
idx += ( ox - cx ) ;
}
if (( sink < 0 ) && oy != cy )
sink = cy * ox ;
}
if ( sink < 0 )
{
if ( oz != cz )
sink = frame_size * cz ;
else
sink = 0 ; // will be over-written anyway, so just give a number
}
*sinkp = sink ;
}
// try to fit shifting cube in block ...
int
cube_size( int *p )
{
int k, i ;
k = 1 ;
i = 3 ;
while ( i-- )
k *= *p++ ;
return ( k ) ;
}
void
ck_blk( char *s, int *small, int *large )
{
int i ;
i = 3 ;
while ( i-- )
{
if ( small[i] > large[i] )
{
printf("%s: %s small %d %d %d large %d %d %d\n",
__func__, s,
small[0], small[1], small[2],
large[0], large[1], large[2]) ;
exit( 33 ) ;
}
}
}
void
ck_size( const char *s, int *p, int size )
{
int k, i ;
k = 1 ;
i = 3 ;
while ( i-- )
k *= *p++ ;
if ( k > size )
{
printf("%s: %s got %d need %d\n", __func__, s, k, size ) ;
exit( 33 ) ;
}
}
int
h_do_find_perm_size( int bx, int by, int bz, int *cx,
int *cy, int *cz, int max_z, int nmea, int min_x, int min_y )
{
double f ;
int dox, done_once, bb[3], cc[3], xy, yz, xz, i, j, k ;
bb[0] = bx ; // block
bb[1] = by ;
bb[2] = bz ;
#ifdef CUDA_DBG1
printf("%s: block %d %d %d cube %d %d %d max_z %d nmea %d min x/y %d %d\n",
__func__, bx, by, bz, *cx, *cy, *cz, max_z, nmea, min_x, min_y ) ;
#endif
k = cube_size( bb ) ;
if ( nmea >= k )
{
*cx = bx ;
*cy = by ;
*cz = bz ;
return ( 1 ) ;
}
cc[0] = *cx ; // selection
cc[1] = *cy ;
cc[2] = *cz ;
if (( cc[0] > bb[0] ) || ( cc[1] > bb[1] ) || ( cc[2] > bb[2] ))
{
#ifdef CUDA_DBG1
printf("size mismatch: %d %d %d -- %d %d %d -- %d\n", cc[0], cc[1], cc[2],
bb[0], bb[1], bb[2], nmea ) ;
#endif
return ( 0 ) ;
}
#ifdef CUDA_DBG1
printf("%s: init: %d %d %d -- %d %d %d -- %d\n", __func__, cc[0], cc[1],
cc[2], bb[0], bb[1], bb[2], nmea ) ;
#endif
i = cube_size( cc ) ;
if ( !i )
{
#ifdef CUDA_DBG1
printf("%s: size 0: %d %d %d -- %d %d %d -- %d\n", __func__,
cc[0], cc[1], cc[2], bb[0], bb[1], bb[2], nmea ) ;
#endif
return ( 0 ) ;
}
f = ( double )nmea / ( double )i ;
#ifdef CUDA_DBG1
printf("2: f %f i %d \n", f, i ) ;
#endif
if ( f < 1.0 ) // razi ...
{
#ifdef CUDA_DBG1
printf("%s:less than 1.0: %d %d %d -- %d %d %d -- %d f %f\n",
__func__, cc[0], cc[1], cc[2], bb[0], bb[1], bb[2], nmea, f ) ;
#endif
return ( 0 ) ;
}
f = pow ( f, 1.0/3.0 ) ;
// it will not shrink ... razi
i = 3 ;
while ( i-- )
cc[i] = ( int )( f * ( double ) cc[i] ) ;
if ( cc[2] > max_z )
cc[2] = max_z ;
#ifdef CUDA_DBG1
printf("%s: max: %d %d %d t %d -- %f mea %d \n", __func__,
cc[0], cc[1], cc[2], cube_size( cc ), f, nmea ) ;
#endif
#ifdef CUDA_DBG1
ck_size( "first adjust", cc, nmea ) ;
#endif
// ok ... less than nmeas ... make sure it is inside the blk
i = 3 ;
while ( i-- )
{
if ( cc[i] > bb[i] )
{
f = (( double ) bb[i] ) / (( double ) cc[i] ) ;
for ( j = 0 ; j < 3 ; j++ )
cc[j] = ( int )(( double )cc[j] * f + 0.5 ) ; // round up
}
}
if ( cc[2] > max_z )
cc[2] = max_z ;
if ( cc[0] < min_x )
cc[0] = min_x ;
if ( cc[1] < min_y )
cc[1] = min_y ;
i = nmea / ( cc[0] * cc[1] ) ;
if ( cc[2] > i )
cc[2] = i ;
#ifdef CUDA_DBG1
ck_size( "inside the box", cc, nmea ) ;
#endif
// ok ... less than nmeas
// ok ... inside the block
#ifdef CUDA_DBG1
printf("%s: inside the box: %d %d %d t %d -- %f -- max %d\n", __func__,
cc[0], cc[1], cc[2], cc[0]* cc[1]* cc[2], f, max_z ) ;
#endif
// ok ... now increase the size ...
done_once = 0 ;
dox = 1 ;
while ( 1 )
{
xy = cc[0] * cc[1] ;
xz = cc[0] * cc[2] ;
yz = cc[1] * cc[2] ;
k = nmea - cube_size( cc ) ;
done_once++ ;
if (( cc[0] > min_x ) && ( cc[1] > min_y ) && ( k >= xy ) && ( cc[2] < bz ) && ( cc[2] < max_z ))
{
cc[2]++ ;
done_once = 0 ;
} else
{
if ( dox )
{
dox = 0 ;
if (( k >= yz ) && ( cc[0] < bx ))
{
done_once = 0 ;
cc[0]++ ;
} else if (( k >= xz ) && ( cc[1] < by ))
{
cc[1]++ ;
done_once = 0 ;
}
} else
{
dox = 1 ;
if (( k >= xz ) && ( cc[1] < by ))
{
cc[1]++ ;
done_once = 0 ;
} else if (( k >= yz ) && ( cc[0] < bx ))
{
cc[0]++ ;
done_once = 0 ;
}
}
}
#ifdef CUDA_DBG1
printf("%s: searching: %d %d %d t %d -- done %d\n", __func__,
cc[0], cc[1], cc[2], cube_size( cc ), done_once ) ;
#endif
if ( done_once == 3 )
break ;
}
#ifdef CUDA_OBS1
printf("%s: winner: %d %d %d t %d %d -- %d\n", __func__,
cc[0], cc[1], cc[2], cube_size( cc ), nmea, nmea - cube_size(cc) ) ;
#endif
*cx = cc[0] ;
*cy = cc[1] ;
*cz = cc[2] ;
return ( 1 ) ;
}
|
61cbc8b1894b06725808842b0d2c3fe784c1d9a3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI acos(-1)
texture <float> tex_CDF;
texture <float> tex_sums;
const int threads_per_block = 512;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
void cuda_print_float_array(float *array_GPU, size_t size) {
//allocate temporary array for printing
float* mem = (float*) malloc(sizeof (float) *size);
//transfer data from device
hipMemcpy(mem, array_GPU, sizeof (float) *size, hipMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
//print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%d]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
//clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a float representing the sum
********************************/
__device__ float calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
float likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((float) (I[ind[index * numOnes + x]] - 100), 2) - pow((float) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(float * CDF, float * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a float representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ float d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
num = seed[index];
return fabs(num / ((float) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
float randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
float randn(int * seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ float d_randn(int * seed, int index) {
//Box-Muller algortihm
float pi = 3.14159265358979323846;
float u = d_randu(seed, index);
float v = d_randu(seed, index);
float cosine = cos(2 * pi * v);
float rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ float updateWeights(float * weights, float * likelihood, int Nparticles) {
int x;
float sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(float * CDF, int beginIndex, int endIndex, float value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(float * arrayX, float * arrayY, float * CDF, float * u, float * xj, float * yj, float * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (tex1Dfetch(tex_CDF, x) >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
weights[i] = 1 / ((float) (Nparticles));
}
__syncthreads();
}
__global__ void normalize_weights_kernel(float * weights, int Nparticles, float * partial_sums, float * CDF, float * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ float u1, sumWeights;
sumWeights = partial_sums[0];
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u1 = (1 / ((float) (Nparticles))) * d_randu(seed, i);
}
if (i < Nparticles) {
__syncthreads();
u[i] = u1 + i / ((float) (Nparticles));
}
}
__global__ void sum_kernel(float* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
float sum = 0;
for (x = 0; x < Nparticles / 512; x++) {
sum += tex1Dfetch(tex_sums, x);
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(float * arrayX, float * arrayY, float * CDF, int * ind, int * objxy, float * likelihood, unsigned char * I, float * u, float * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, float * partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
float indX, indY;
__shared__ float buffer[512];
if (i < Nparticles) {
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
__syncthreads();
}
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
indX = round(arrayX[i]) + objxy[y * 2 + 1];
indY = round(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = fabs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
__syncthreads();
}
if (i < Nparticles) {
weights[i] = weights[i] * likelihood[i];
__syncthreads();
buffer[threadIdx.x] = weights[i];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}//*/
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double) (x - radius + 1), 2) + pow((double) (y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance = sqrt(pow((double) (x - posX), 2) + pow((double) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY*Nfr;
/*get object centers*/
int x0 = (int) roundDouble(IszY / 2.0);
int y0 = (int) roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k));
yk = abs(y0 - 2 * (k));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(float * CDF, int lengthCDF, float value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles) {
int max_size = IszX * IszY*Nfr;
//original particle centroid
float xe = roundDouble(IszY / 2.0);
float ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*radius;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
float * weights = (float *) malloc(sizeof (float) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((float) (Nparticles));
}
//initial likelihood to 0.0
float * likelihood = (float *) malloc(sizeof (float) *Nparticles);
float * arrayX = (float *) malloc(sizeof (float) *Nparticles);
float * arrayY = (float *) malloc(sizeof (float) *Nparticles);
float * xj = (float *) malloc(sizeof (float) *Nparticles);
float * yj = (float *) malloc(sizeof (float) *Nparticles);
float * CDF = (float *) malloc(sizeof (float) *Nparticles);
//GPU copies of arrays
float * arrayX_GPU;
float * arrayY_GPU;
float * xj_GPU;
float * yj_GPU;
float * CDF_GPU;
float * likelihood_GPU;
unsigned char * I_GPU;
float * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes);
int * ind_GPU;
float * u = (float *) malloc(sizeof (float) *Nparticles);
float * u_GPU;
int * seed_GPU;
float * partial_sums;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &likelihood_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &weights_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
check_error(hipMalloc((void **) &objxy_GPU, sizeof (int) *countOnes));
check_error(hipMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
check_error(hipMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
check_error(hipMalloc((void **) &partial_sums, sizeof (float) *Nparticles));
for (x = 0; x < Nparticles; x++) {
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
int indX, indY;
//start send
long long send_start = get_time();
check_error(hipMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, hipMemcpyHostToDevice));
check_error(hipMemcpy(objxy_GPU, objxy, sizeof (int) *countOnes, hipMemcpyHostToDevice));
check_error(hipMemcpy(weights_GPU, weights, sizeof (float) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(arrayX_GPU, arrayX, sizeof (float) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(arrayY_GPU, arrayY, sizeof (float) *Nparticles, hipMemcpyHostToDevice));
check_error(hipMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, hipMemcpyHostToDevice));
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((float) Nparticles / (float) threads_per_block);
for (k = 1; k < Nfr; k++) {
hipLaunchKernelGGL(( likelihood_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
hipBindTexture(0, tex_sums, partial_sums, Nparticles);
hipLaunchKernelGGL(( sum_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, partial_sums, Nparticles);
check_error(hipUnbindTexture(tex_sums));
hipLaunchKernelGGL(( normalize_weights_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
hipBindTexture(0, tex_CDF, CDF_GPU, Nparticles);
hipLaunchKernelGGL(( find_index_kernel) , dim3(num_blocks), dim3(threads_per_block) , 0, 0, arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
check_error(hipUnbindTexture(tex_CDF));
}//end loop
//block till kernels are finished
hipDeviceSynchronize();
long long back_time = get_time();
hipFree(xj_GPU);
hipFree(yj_GPU);
hipFree(CDF_GPU);
hipFree(u_GPU);
hipFree(likelihood_GPU);
hipFree(I_GPU);
hipFree(objxy_GPU);
hipFree(ind_GPU);
hipFree(seed_GPU);
hipFree(partial_sums);
long long free_time = get_time();
check_error(hipMemcpy(arrayX, arrayX_GPU, sizeof (float) *Nparticles, hipMemcpyDeviceToHost));
long long arrayX_time = get_time();
check_error(hipMemcpy(arrayY, arrayY_GPU, sizeof (float) *Nparticles, hipMemcpyDeviceToHost));
long long arrayY_time = get_time();
check_error(hipMemcpy(weights, weights_GPU, sizeof (float) *Nparticles, hipMemcpyDeviceToHost));
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
//printf("AFTER: arrayX[%d]::%0.06f arrayY[%d]::%0.06f\n", x, arrayX[x], x, arrayY[x]);
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
float distance = sqrt(pow((float) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((float) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
//CUDA freeing of memory
hipFree(weights_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
int main(int argc, char * argv[]) {
char* usage = "float.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
//check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") || strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
| 61cbc8b1894b06725808842b0d2c3fe784c1d9a3.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI acos(-1)
texture <float> tex_CDF;
texture <float> tex_sums;
const int threads_per_block = 512;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
void cuda_print_float_array(float *array_GPU, size_t size) {
//allocate temporary array for printing
float* mem = (float*) malloc(sizeof (float) *size);
//transfer data from device
cudaMemcpy(mem, array_GPU, sizeof (float) *size, cudaMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
//print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%d]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
//clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a float representing the sum
********************************/
__device__ float calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
float likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((float) (I[ind[index * numOnes + x]] - 100), 2) - pow((float) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(float * CDF, float * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a float representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ float d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
num = seed[index];
return fabs(num / ((float) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
float randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
float randn(int * seed, int index) {
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2 * PI * v);
double rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ float d_randn(int * seed, int index) {
//Box-Muller algortihm
float pi = 3.14159265358979323846;
float u = d_randu(seed, index);
float v = d_randu(seed, index);
float cosine = cos(2 * pi * v);
float rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ float updateWeights(float * weights, float * likelihood, int Nparticles) {
int x;
float sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(float * CDF, int beginIndex, int endIndex, float value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(float * arrayX, float * arrayY, float * CDF, float * u, float * xj, float * yj, float * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (tex1Dfetch(tex_CDF, x) >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
weights[i] = 1 / ((float) (Nparticles));
}
__syncthreads();
}
__global__ void normalize_weights_kernel(float * weights, int Nparticles, float * partial_sums, float * CDF, float * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ float u1, sumWeights;
sumWeights = partial_sums[0];
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u1 = (1 / ((float) (Nparticles))) * d_randu(seed, i);
}
if (i < Nparticles) {
__syncthreads();
u[i] = u1 + i / ((float) (Nparticles));
}
}
__global__ void sum_kernel(float* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
float sum = 0;
for (x = 0; x < Nparticles / 512; x++) {
sum += tex1Dfetch(tex_sums, x);
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(float * arrayX, float * arrayY, float * CDF, int * ind, int * objxy, float * likelihood, unsigned char * I, float * u, float * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, float * partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
float indX, indY;
__shared__ float buffer[512];
if (i < Nparticles) {
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
__syncthreads();
}
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
indX = round(arrayX[i]) + objxy[y * 2 + 1];
indY = round(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = fabs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
__syncthreads();
}
if (i < Nparticles) {
weights[i] = weights[i] * likelihood[i];
__syncthreads();
buffer[threadIdx.x] = weights[i];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}//*/
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
double distance = sqrt(pow((double) (x - radius + 1), 2) + pow((double) (y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
double distance = sqrt(pow((double) (x - posX), 2) + pow((double) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY*Nfr;
/*get object centers*/
int x0 = (int) roundDouble(IszY / 2.0);
int y0 = (int) roundDouble(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k));
yk = abs(y0 - 2 * (k));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(float * CDF, int lengthCDF, float value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles) {
int max_size = IszX * IszY*Nfr;
//original particle centroid
float xe = roundDouble(IszY / 2.0);
float ye = roundDouble(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*radius;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int * objxy = (int *) malloc(countOnes * 2 * sizeof (int));
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
float * weights = (float *) malloc(sizeof (float) *Nparticles);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((float) (Nparticles));
}
//initial likelihood to 0.0
float * likelihood = (float *) malloc(sizeof (float) *Nparticles);
float * arrayX = (float *) malloc(sizeof (float) *Nparticles);
float * arrayY = (float *) malloc(sizeof (float) *Nparticles);
float * xj = (float *) malloc(sizeof (float) *Nparticles);
float * yj = (float *) malloc(sizeof (float) *Nparticles);
float * CDF = (float *) malloc(sizeof (float) *Nparticles);
//GPU copies of arrays
float * arrayX_GPU;
float * arrayY_GPU;
float * xj_GPU;
float * yj_GPU;
float * CDF_GPU;
float * likelihood_GPU;
unsigned char * I_GPU;
float * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes);
int * ind_GPU;
float * u = (float *) malloc(sizeof (float) *Nparticles);
float * u_GPU;
int * seed_GPU;
float * partial_sums;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &likelihood_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &weights_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
check_error(cudaMalloc((void **) &objxy_GPU, sizeof (int) *countOnes));
check_error(cudaMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
check_error(cudaMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
check_error(cudaMalloc((void **) &partial_sums, sizeof (float) *Nparticles));
for (x = 0; x < Nparticles; x++) {
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
int indX, indY;
//start send
long long send_start = get_time();
check_error(cudaMemcpy(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(objxy_GPU, objxy, sizeof (int) *countOnes, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(weights_GPU, weights, sizeof (float) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(arrayX_GPU, arrayX, sizeof (float) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(arrayY_GPU, arrayY, sizeof (float) *Nparticles, cudaMemcpyHostToDevice));
check_error(cudaMemcpy(seed_GPU, seed, sizeof (int) *Nparticles, cudaMemcpyHostToDevice));
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((float) Nparticles / (float) threads_per_block);
for (k = 1; k < Nfr; k++) {
likelihood_kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
cudaBindTexture(0, tex_sums, partial_sums, Nparticles);
sum_kernel <<< num_blocks, threads_per_block >>> (partial_sums, Nparticles);
check_error(cudaUnbindTexture(tex_sums));
normalize_weights_kernel <<< num_blocks, threads_per_block >>> (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
cudaBindTexture(0, tex_CDF, CDF_GPU, Nparticles);
find_index_kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
check_error(cudaUnbindTexture(tex_CDF));
}//end loop
//block till kernels are finished
cudaThreadSynchronize();
long long back_time = get_time();
cudaFree(xj_GPU);
cudaFree(yj_GPU);
cudaFree(CDF_GPU);
cudaFree(u_GPU);
cudaFree(likelihood_GPU);
cudaFree(I_GPU);
cudaFree(objxy_GPU);
cudaFree(ind_GPU);
cudaFree(seed_GPU);
cudaFree(partial_sums);
long long free_time = get_time();
check_error(cudaMemcpy(arrayX, arrayX_GPU, sizeof (float) *Nparticles, cudaMemcpyDeviceToHost));
long long arrayX_time = get_time();
check_error(cudaMemcpy(arrayY, arrayY_GPU, sizeof (float) *Nparticles, cudaMemcpyDeviceToHost));
long long arrayY_time = get_time();
check_error(cudaMemcpy(weights, weights_GPU, sizeof (float) *Nparticles, cudaMemcpyDeviceToHost));
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
//printf("AFTER: arrayX[%d]::%0.06f arrayY[%d]::%0.06f\n", x, arrayX[x], x, arrayY[x]);
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
float distance = sqrt(pow((float) (xe - (int) roundDouble(IszY / 2.0)), 2) + pow((float) (ye - (int) roundDouble(IszX / 2.0)), 2));
printf("%lf\n", distance);
//CUDA freeing of memory
cudaFree(weights_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(ind);
free(u);
}
int main(int argc, char * argv[]) {
char* usage = "float.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
//check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") || strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
fee5aa023217eeecee33035d51d28f689e020330.hip | // !!! This is a file automatically generated by hipify!!!
#include "cuda_matrix.h"
#include <stdlib.h>
#include <stdio.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
/**
* @brief Host
* host
* -1073741818
*/
class Matrix
{
protected:
Matrix() {}
int width;
int height;
float *elements;
public:
/**
* @brief
* @return
*/
int getWidth()
{
return width;
}
/**
* @brief
* @return
*/
int getHeight()
{
return height;
}
/**
* @brief
* @return
*/
float* getElements()
{
return elements;
}
/**
* @brief
* @param i
* @param j
* @return
*/
float& operator()(int i, int j)
{
return elements[i * width + j];
}
/**
* @brief
* @param h
* @param w
* Host
*/
Matrix(int h, int w)
{
height = h;
width = w;
elements = (float*)malloc(h * w * sizeof(float));
}
/**
* @brief
* Host
*/
~Matrix()
{
free(elements);
}
};
class CUDAMatrix:Matrix
{
public:
/**
* @brief
* @return
*/
__device__ int getHeight()
{
return height;
}
/**
* @brief
* @return
*/
__device__ int getWidth()
{
return width;
}
/**
* @brief
* @return
*/
float* getElements()
{
return elements;
}
/**
* @brief
* @param i
* @param j
* @return
*/
__device__ float& operator()(int i, int j)
{
return elements[i * width + j];
}
/**
* @brief HostDevice
* @param mat Host
*/
void memCpyFrom(Matrix mat)
{
hipMemcpy((void*)elements, (void*)mat.getElements(), mat.getHeight() * mat.getWidth(), hipMemcpyHostToDevice);
}
/**
* @brief
* @param h
* @param w
* Device
*/
CUDAMatrix(int h, int w)
{
height = h;
width = w;
hipMalloc((void**)&elements, h * w * sizeof(float));
}
/**
* @brief
* Device
*/
~CUDAMatrix()
{
hipFree(elements);
}
};
/**
* @brief -
* @param A
* @param B
* @param C
* @param Aw A
* @param Bw B
* @param Cw C
* AB
*/
__global__ void cuda_matrix_mul(float *A, float *B, float *C, int Aw, int Bw, int Cw)
{
//
int row = threadIdx.y + blockIdx.y * blockDim.y; //
int col = threadIdx.x + blockIdx.x * blockDim.x; //
int indexC = row * Cw + col;
int indexA = row * Aw;
/* */
//C(row, col) = 0.0;
for (int i = 0; i < Aw; i++)
{
C[indexC] += A[indexA + i] * B[i * Bw + col];
}
}
/**
* @brief Host-
*/
void matrix_mul()
{
printf("--------------------------------\r\n");
int Ah = 1 << 10; // A
int Aw = 1 << 10; // A
int Bh = Aw; // B
int Bw = 1 << 10; // B
int Ch = Ah; // C
int Cw = Bw; // C
/* Host */
// Matrix A(Ah, Aw), B(Bh, Bw), C(Ch, Cw);
float *A, *B, *C;
int lenA = Ah * Aw * sizeof(float);
int lenB = Bh * Bw * sizeof(float);
int lenC = Ch * Cw * sizeof(float);
A = (float*)malloc(lenA);
B = (float*)malloc(lenB);
C = (float*)malloc(lenC);
/* AB */
for (int i = 0; i < Ah; i++)
for (int j = 0; j < Aw; j++)
A[i * Aw + j] = 2.0;
for (int i = 0; i < Bh; i++)
for (int j = 0; j < Bw; j++)
B[i * Bw + j] = 3.0;
/* Device */
//CUDAMatrix cA(Ah, Aw), cB(Bh, Bw), cC(Ch, Cw);
float *cA, *cB, *cC;
hipMalloc((void**)&cA, lenA);
hipMalloc((void**)&cB, lenB);
hipMalloc((void**)&cC, lenC);
/* HostDevice */
//cA.memCpyFrom(A);
//cB.memCpyFrom(B);
hipMemcpy((void*)cA, (void*)A, lenA, hipMemcpyHostToDevice);
hipMemcpy((void*)cB, (void*)B, lenB, hipMemcpyHostToDevice);
/* kernel */
dim3 blockSize(32, 32);
dim3 gridSize((Ch + blockSize.x - 1) / blockSize.x, (Cw + blockSize.y - 1) / blockSize.y);
printf("blockSize.x = %d, blockSize.y = %d\r\n", blockSize.x, blockSize.y);
printf("gridSize.x = %d, gridSize.y = %d\r\n", gridSize.x, gridSize.y);
/* */
printf("...\r\n");
cuda_matrix_mul << <gridSize, blockSize >> > (cA, cB, cC, Aw, Bw, Cw);
printf("\r\n");
/* DeviceHost */
hipMemcpy((void*)C, (void*)cC, lenC, hipMemcpyDeviceToHost);
//
printf("...\r\n");
float maxError = 0.0;
for (int i = 0; i < Ch; i++)
for (int j = 0; j < Cw; j++)
maxError += (C[i * Cw + j] - 2.0 * 3.0 * Aw) * (C[i * Cw + j] - 2.0 * 3.0 * Aw);
printf(": %f\r\n", maxError);
} | fee5aa023217eeecee33035d51d28f689e020330.cu | #include "cuda_matrix.h"
#include <stdlib.h>
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
/**
* @brief 自定义Host端矩阵
* 在host端使用
* 遇到返回代码为-1073741818的未知错误,暂时不用抽象的类
*/
class Matrix
{
protected:
Matrix() {}
int width;
int height;
float *elements;
public:
/**
* @brief 获取矩阵宽度
* @return 矩阵的宽度
*/
int getWidth()
{
return width;
}
/**
* @brief 获取矩阵高度
* @return 矩阵的高度
*/
int getHeight()
{
return height;
}
/**
* @brief 获取矩阵元素
* @return 矩阵元素
*/
float* getElements()
{
return elements;
}
/**
* @brief 括号运算符重载
* @param i 行索引
* @param j 列索引
* @return 对应位置的矩阵元素
*/
float& operator()(int i, int j)
{
return elements[i * width + j];
}
/**
* @brief 构造函数
* @param h 矩阵高度
* @param w 矩阵宽度
* 申请Host端内存空间
*/
Matrix(int h, int w)
{
height = h;
width = w;
elements = (float*)malloc(h * w * sizeof(float));
}
/**
* @brief 析构函数
* 释放Host端内存空间
*/
~Matrix()
{
free(elements);
}
};
class CUDAMatrix:Matrix
{
public:
/**
* @brief 获取矩阵高度
* @return 矩阵的高度
*/
__device__ int getHeight()
{
return height;
}
/**
* @brief 获取矩阵宽度
* @return 矩阵的宽度
*/
__device__ int getWidth()
{
return width;
}
/**
* @brief 获取矩阵元素
* @return 矩阵元素
*/
float* getElements()
{
return elements;
}
/**
* @brief 括号运算符重载
* @param i 行索引
* @param j 列索引
* @return 对应位置的矩阵元素
*/
__device__ float& operator()(int i, int j)
{
return elements[i * width + j];
}
/**
* @brief 从Host拷贝数据到Device
* @param mat Host端矩阵
*/
void memCpyFrom(Matrix mat)
{
cudaMemcpy((void*)elements, (void*)mat.getElements(), mat.getHeight() * mat.getWidth(), cudaMemcpyHostToDevice);
}
/**
* @brief 构造函数
* @param h 矩阵高度
* @param w 矩阵宽度
* 申请Device端内存空间
*/
CUDAMatrix(int h, int w)
{
height = h;
width = w;
cudaMalloc((void**)&elements, h * w * sizeof(float));
}
/**
* @brief 析构函数
* 释放Device端内存空间
*/
~CUDAMatrix()
{
cudaFree(elements);
}
};
/**
* @brief 核函数-矩阵乘
* @param A 输入矩阵
* @param B 输入矩阵
* @param C 输出矩阵
* @param Aw 矩阵A的宽度
* @param Bw 矩阵B的宽度
* @param Cw 矩阵C的宽度
* 注意矩阵A的宽度要与矩阵B的高度相等
*/
__global__ void cuda_matrix_mul(float *A, float *B, float *C, int Aw, int Bw, int Cw)
{
// 计算元素的绝对位置
int row = threadIdx.y + blockIdx.y * blockDim.y; // 行
int col = threadIdx.x + blockIdx.x * blockDim.x; // 列
int indexC = row * Cw + col;
int indexA = row * Aw;
/* 每个线程计算单个元素 */
//C(row, col) = 0.0;
for (int i = 0; i < Aw; i++)
{
C[indexC] += A[indexA + i] * B[i * Bw + col];
}
}
/**
* @brief Host端函数-矩阵乘
*/
void matrix_mul()
{
printf("----------------计算矩阵乘----------------\r\n");
int Ah = 1 << 10; // A矩阵高度
int Aw = 1 << 10; // A矩阵宽度
int Bh = Aw; // B矩阵高度
int Bw = 1 << 10; // B矩阵宽度
int Ch = Ah; // C矩阵高度
int Cw = Bw; // C矩阵宽度
/* Host端申请空间 */
// Matrix A(Ah, Aw), B(Bh, Bw), C(Ch, Cw);
float *A, *B, *C;
int lenA = Ah * Aw * sizeof(float);
int lenB = Bh * Bw * sizeof(float);
int lenC = Ch * Cw * sizeof(float);
A = (float*)malloc(lenA);
B = (float*)malloc(lenB);
C = (float*)malloc(lenC);
/* 给A、B赋初值 */
for (int i = 0; i < Ah; i++)
for (int j = 0; j < Aw; j++)
A[i * Aw + j] = 2.0;
for (int i = 0; i < Bh; i++)
for (int j = 0; j < Bw; j++)
B[i * Bw + j] = 3.0;
/* Device端申请空间 */
//CUDAMatrix cA(Ah, Aw), cB(Bh, Bw), cC(Ch, Cw);
float *cA, *cB, *cC;
cudaMalloc((void**)&cA, lenA);
cudaMalloc((void**)&cB, lenB);
cudaMalloc((void**)&cC, lenC);
/* 将数据从Host拷入Device */
//cA.memCpyFrom(A);
//cB.memCpyFrom(B);
cudaMemcpy((void*)cA, (void*)A, lenA, cudaMemcpyHostToDevice);
cudaMemcpy((void*)cB, (void*)B, lenB, cudaMemcpyHostToDevice);
/* 定义kernel */
dim3 blockSize(32, 32);
dim3 gridSize((Ch + blockSize.x - 1) / blockSize.x, (Cw + blockSize.y - 1) / blockSize.y);
printf("blockSize.x = %d, blockSize.y = %d\r\n", blockSize.x, blockSize.y);
printf("gridSize.x = %d, gridSize.y = %d\r\n", gridSize.x, gridSize.y);
/* 执行计算 */
printf("开始内核计算...\r\n");
cuda_matrix_mul << <gridSize, blockSize >> > (cA, cB, cC, Aw, Bw, Cw);
printf("完成内核计算!\r\n");
/* 将数据从Device拷贝回Host */
cudaMemcpy((void*)C, (void*)cC, lenC, cudaMemcpyDeviceToHost);
// 检查结果
printf("计算误差平方和...\r\n");
float maxError = 0.0;
for (int i = 0; i < Ch; i++)
for (int j = 0; j < Cw; j++)
maxError += (C[i * Cw + j] - 2.0 * 3.0 * Aw) * (C[i * Cw + j] - 2.0 * 3.0 * Aw);
printf("误差平方和: %f\r\n", maxError);
} |
b348b4a8b8ca4acf367822c6964dd41d0a616dd2.hip | // !!! This is a file automatically generated by hipify!!!
/**
* APPROXIMATE PATTERN MATCHING
*
* INF560
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#define APM_DEBUG 0
char *
read_input_file( char * filename, int * size )
{
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open( filename, O_RDONLY ) ;
if ( fd == -1 )
{
fprintf( stderr, "Unable to open the text file <%s>\n", filename ) ;
return NULL ;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
if ( fsize == -1 )
{
fprintf( stderr, "Unable to lseek to the end\n" ) ;
return NULL ;
}
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Go back to the beginning of the input file */
if ( lseek(fd, 0, SEEK_SET) == -1 )
{
fprintf( stderr, "Unable to lseek to start\n" ) ;
return NULL ;
}
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if ( buf == NULL )
{
fprintf( stderr, "Unable to allocate %lld byte(s) for main array\n",
fsize ) ;
return NULL ;
}
n_bytes = read( fd, buf, fsize ) ;
if ( n_bytes != fsize )
{
fprintf( stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close( fd ) ;
return buf ;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++)
{
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
__global__ void compMatches(char* pattern,char * buf, int n_bytes, int size_pattern, int approx_factor, int * resultsth){
int distance = 0 ;
int size ;
int i = threadIdx.x + blockIdx.x*blockDim.x;
int n_th = gridDim.x*blockDim.x;
resultsth[i] = 0;
int * column = (int *)malloc( (size_pattern+1) * sizeof( int ) ) ;
if(i < n_bytes){
for(int j = i; j < n_bytes; j += n_th){
size = size_pattern ;
if ( n_bytes - j < size_pattern )
{
size = n_bytes - j ;
}
char * s1 = pattern;
char * s2 = &buf[j];
int len = size;
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++)
{
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
distance = column[len];
if ( distance <= approx_factor ) {
resultsth[i]++ ;
}
}
}
}
void checkGpuMem()
{
float free_m,total_m,used_m;
size_t free_t,total_t;
hipMemGetInfo(&free_t,&total_t);
free_m =(uint)free_t/1048576.0 ;
total_m=(uint)total_t/1048576.0;
used_m=total_m-free_m;
printf ( " mem free %d .... %f MB mem total %d....%f MB mem used %f MB\n",free_t,free_m,total_t,total_m,used_m);
}
int
main( int argc, char ** argv )
{
char ** pattern ;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
char * buf ;
struct timeval t1, t2;
double duration ;
int n_bytes ;
int * n_matches ;
/* Check number of arguments */
if ( argc < 4 )
{
printf( "Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0] ) ;
return 1 ;
}
/* Get the distance factor */
approx_factor = atoi( argv[1] ) ;
/* Grab the filename containing the target text */
filename = argv[2] ;
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3 ;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if ( pattern == NULL )
{
fprintf( stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns ) ;
return 1 ;
}
checkGpuMem();
/* Grab the patterns */
for (int i = 0 ; i < nb_patterns ; i++ )
{
int l ;
l = strlen(argv[i+3]) ;
if ( l <= 0 )
{
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if ( pattern[i] == NULL )
{
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor ) ;
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL )
{
return 1 ;
}
hipError_t err;
char * cbuf;
err = hipMalloc((void **)&cbuf, n_bytes* sizeof(char));
if( err != hipSuccess) {
printf("Error !");
exit(1);
}
err = hipMemcpy(cbuf, buf,n_bytes* sizeof(char) ,hipMemcpyHostToDevice);
if( err != hipSuccess) {
printf("Error !");
exit(1);
}
int nblock = 100;
int nth_b = 1024;
int nth = nblock*nth_b;
int * results_th;
err = hipMalloc((void **)&results_th, nth* sizeof(int));
if( err != hipSuccess) {
printf("Error !");
exit(1);
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
if ( n_matches == NULL )
{
fprintf( stderr, "Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int ) ) ;
return 1 ;
}
/*****
* BEGIN MAIN LOOP
******/
/* Timer start */
gettimeofday(&t1, NULL);
/* Check each pattern one by one */
for (int i = 0 ; i < nb_patterns ; i++ )
{
int size_pattern = strlen(pattern[i]) ;
/* Initialize the number of matches to 0 */
n_matches[i] = 0 ;
char * cpattern;
err = hipMalloc((void **)&cpattern, size_pattern* sizeof(char));
if( err != hipSuccess) {
printf("Error !");
exit(1);
}
err= hipMemcpy(cpattern,pattern[i], size_pattern* sizeof(char), hipMemcpyHostToDevice);
if( err != hipSuccess) {
printf("Error !");
exit(1);
}
hipLaunchKernelGGL(( compMatches), dim3(nblock), dim3(nth_b), 0, 0, cpattern,cbuf,n_bytes,size_pattern,approx_factor,results_th);
err = hipGetLastError();
if( err != hipSuccess) {
printf("Error ! in kernel");
exit(1);
}
int * results;
results = (int *)malloc(nth* sizeof(int));
err = hipMemcpy(results,results_th, nth* sizeof(int), hipMemcpyDeviceToHost);
if( err != hipSuccess) {
printf("Error ! in copying results %s", hipGetErrorString(err));
exit(1);
}
for(int j = 0; j < nth && j < n_bytes; j++){
n_matches[i] += results[j];
}
}
/* Timer stop */
gettimeofday(&t2, NULL);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
printf( "APM done in %lf s\n", duration ) ;
/*****
* END MAIN LOOP
******/
for ( int i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], n_matches[i] ) ;
}
return 0 ;
}
| b348b4a8b8ca4acf367822c6964dd41d0a616dd2.cu | /**
* APPROXIMATE PATTERN MATCHING
*
* INF560
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define APM_DEBUG 0
char *
read_input_file( char * filename, int * size )
{
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open( filename, O_RDONLY ) ;
if ( fd == -1 )
{
fprintf( stderr, "Unable to open the text file <%s>\n", filename ) ;
return NULL ;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
if ( fsize == -1 )
{
fprintf( stderr, "Unable to lseek to the end\n" ) ;
return NULL ;
}
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Go back to the beginning of the input file */
if ( lseek(fd, 0, SEEK_SET) == -1 )
{
fprintf( stderr, "Unable to lseek to start\n" ) ;
return NULL ;
}
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if ( buf == NULL )
{
fprintf( stderr, "Unable to allocate %lld byte(s) for main array\n",
fsize ) ;
return NULL ;
}
n_bytes = read( fd, buf, fsize ) ;
if ( n_bytes != fsize )
{
fprintf( stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close( fd ) ;
return buf ;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++)
{
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
__global__ void compMatches(char* pattern,char * buf, int n_bytes, int size_pattern, int approx_factor, int * resultsth){
int distance = 0 ;
int size ;
int i = threadIdx.x + blockIdx.x*blockDim.x;
int n_th = gridDim.x*blockDim.x;
resultsth[i] = 0;
int * column = (int *)malloc( (size_pattern+1) * sizeof( int ) ) ;
if(i < n_bytes){
for(int j = i; j < n_bytes; j += n_th){
size = size_pattern ;
if ( n_bytes - j < size_pattern )
{
size = n_bytes - j ;
}
char * s1 = pattern;
char * s2 = &buf[j];
int len = size;
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++)
{
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
distance = column[len];
if ( distance <= approx_factor ) {
resultsth[i]++ ;
}
}
}
}
void checkGpuMem()
{
float free_m,total_m,used_m;
size_t free_t,total_t;
cudaMemGetInfo(&free_t,&total_t);
free_m =(uint)free_t/1048576.0 ;
total_m=(uint)total_t/1048576.0;
used_m=total_m-free_m;
printf ( " mem free %d .... %f MB mem total %d....%f MB mem used %f MB\n",free_t,free_m,total_t,total_m,used_m);
}
int
main( int argc, char ** argv )
{
char ** pattern ;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
char * buf ;
struct timeval t1, t2;
double duration ;
int n_bytes ;
int * n_matches ;
/* Check number of arguments */
if ( argc < 4 )
{
printf( "Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0] ) ;
return 1 ;
}
/* Get the distance factor */
approx_factor = atoi( argv[1] ) ;
/* Grab the filename containing the target text */
filename = argv[2] ;
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3 ;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if ( pattern == NULL )
{
fprintf( stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns ) ;
return 1 ;
}
checkGpuMem();
/* Grab the patterns */
for (int i = 0 ; i < nb_patterns ; i++ )
{
int l ;
l = strlen(argv[i+3]) ;
if ( l <= 0 )
{
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if ( pattern[i] == NULL )
{
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor ) ;
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL )
{
return 1 ;
}
cudaError_t err;
char * cbuf;
err = cudaMalloc((void **)&cbuf, n_bytes* sizeof(char));
if( err != cudaSuccess) {
printf("Error !");
exit(1);
}
err = cudaMemcpy(cbuf, buf,n_bytes* sizeof(char) ,cudaMemcpyHostToDevice);
if( err != cudaSuccess) {
printf("Error !");
exit(1);
}
int nblock = 100;
int nth_b = 1024;
int nth = nblock*nth_b;
int * results_th;
err = cudaMalloc((void **)&results_th, nth* sizeof(int));
if( err != cudaSuccess) {
printf("Error !");
exit(1);
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
if ( n_matches == NULL )
{
fprintf( stderr, "Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int ) ) ;
return 1 ;
}
/*****
* BEGIN MAIN LOOP
******/
/* Timer start */
gettimeofday(&t1, NULL);
/* Check each pattern one by one */
for (int i = 0 ; i < nb_patterns ; i++ )
{
int size_pattern = strlen(pattern[i]) ;
/* Initialize the number of matches to 0 */
n_matches[i] = 0 ;
char * cpattern;
err = cudaMalloc((void **)&cpattern, size_pattern* sizeof(char));
if( err != cudaSuccess) {
printf("Error !");
exit(1);
}
err= cudaMemcpy(cpattern,pattern[i], size_pattern* sizeof(char), cudaMemcpyHostToDevice);
if( err != cudaSuccess) {
printf("Error !");
exit(1);
}
compMatches<<<nblock, nth_b>>>(cpattern,cbuf,n_bytes,size_pattern,approx_factor,results_th);
err = cudaGetLastError();
if( err != cudaSuccess) {
printf("Error ! in kernel");
exit(1);
}
int * results;
results = (int *)malloc(nth* sizeof(int));
err = cudaMemcpy(results,results_th, nth* sizeof(int), cudaMemcpyDeviceToHost);
if( err != cudaSuccess) {
printf("Error ! in copying results %s", cudaGetErrorString(err));
exit(1);
}
for(int j = 0; j < nth && j < n_bytes; j++){
n_matches[i] += results[j];
}
}
/* Timer stop */
gettimeofday(&t2, NULL);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
printf( "APM done in %lf s\n", duration ) ;
/*****
* END MAIN LOOP
******/
for ( int i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], n_matches[i] ) ;
}
return 0 ;
}
|
9fb1acfa5b066db4c1d682092b9c44a8251599a0.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_256_one_1024.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
hipMalloc(&B, XSIZE*YSIZE);
float *bnBias = NULL;
hipMalloc(&bnBias, XSIZE*YSIZE);
float *bnScale = NULL;
hipMalloc(&bnScale, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_256_one_1024), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,bnBias,bnScale,C);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_256_one_1024), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,bnBias,bnScale,C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_256_one_1024), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,bnBias,bnScale,C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9fb1acfa5b066db4c1d682092b9c44a8251599a0.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_256_one_1024.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *B = NULL;
cudaMalloc(&B, XSIZE*YSIZE);
float *bnBias = NULL;
cudaMalloc(&bnBias, XSIZE*YSIZE);
float *bnScale = NULL;
cudaMalloc(&bnScale, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_256_one_1024<<<gridBlock,threadBlock>>>(A,B,bnBias,bnScale,C);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_256_one_1024<<<gridBlock,threadBlock>>>(A,B,bnBias,bnScale,C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_256_one_1024<<<gridBlock,threadBlock>>>(A,B,bnBias,bnScale,C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9f3ec959ce753b6c6dff8350793b78dac960b895.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <algorithm>
#include <cmath>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/lstm_common_layers.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype tanh(const Dtype x) {
return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1);
}
template <typename Dtype>
__global__ void ClipAdd(const int nthreads, const int dim, int t,
const Dtype* clip, const Dtype* add_vec, Dtype* data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
data[index] += clip_t * add_vec[index];
}
}
template <typename Dtype>
__global__ void ActivationForward(const int nthreads, const int H,
const Dtype* pre_gate, Dtype* gate) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4*H);
gate[index] = d < 3*H ? sigmoid(pre_gate[index]) : tanh(pre_gate[index]);
}
}
template <typename Dtype>
__global__ void LSTMForward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* clip,
Dtype* c_t, Dtype* h_t) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* offset = gate + 4*H*n;
const Dtype i_t = offset[d];
const Dtype f_t = offset[H + d];
const Dtype o_t = offset[2*H + d];
const Dtype g_t = offset[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
c_t[index] = clip_t * f_t * c_t_1 + i_t * g_t;
h_t[index] = o_t * tanh(c_t[index]);
}
}
template <typename Dtype>
__global__ void LSTMBackward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* c_t,
const Dtype* clip, Dtype* dc_t, const Dtype* dh_t,
Dtype* dc_prev, Dtype* gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* gate_t = gate + 4*H*n;
const Dtype i_t = gate_t[d];
const Dtype f_t = gate_t[H + d];
const Dtype o_t = gate_t[2*H + d];
const Dtype g_t = gate_t[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype c = c_t[index];
const Dtype tanh_c = tanh(c);
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
Dtype* dc_t_1 = dc_prev + index;
Dtype* gate_diff_t = gate_diff + 4*H*n;
Dtype* di_t = gate_diff_t + d;
Dtype* df_t = gate_diff_t + H + d;
Dtype* do_t = gate_diff_t + 2*H + d;
Dtype* dg_t = gate_diff_t + 3*H + d;
// Output gate : tanh(c(t)) * h_diff(t)
*do_t = dh_t[index] * tanh_c;
// Cell state : o(t) * tanh'(c(t)) * h_diff(t) + f(t+1) * c_diff(t+1)
dc_t[index] += dh_t[index] * o_t * (Dtype(1) - tanh_c * tanh_c);
// c_diff(t-1) += f(t) * c_diff(t)
*dc_t_1 = clip_t * dc_t[index] * f_t;
// Forget gate : c(t-1) * c_diff(t)
*df_t = clip_t * dc_t[index] * c_t_1;
// Input gate : g(t) * c_diff(t)
*di_t = dc_t[index] * g_t;
// Input modulation gate : i(t) * c_diff(t)
*dg_t = dc_t[index] * i_t;
}
}
template <typename Dtype>
__global__ void ActivationBackward(const int nthreads, const int H,
const Dtype clip_threshold, const Dtype* gate, const Dtype* gate_diff,
Dtype* pre_gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4 * H);
const Dtype gate_val = gate[index];
if (d < 3 * H) {
pre_gate_diff[index] = gate_diff[index] * gate_val * (Dtype(1) - gate_val);
} else {
pre_gate_diff[index] = gate_diff[index] * (Dtype(1) - gate_val * gate_val);
}
if (clip_threshold > Dtype(0)) {
if (pre_gate_diff[index] < -clip_threshold) {
pre_gate_diff[index] = -clip_threshold;
}
else if (pre_gate_diff[index] > clip_threshold) {
pre_gate_diff[index] = clip_threshold;
}
}
}
}
template <typename Dtype>
void LstmLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_EQ(top[0]->gpu_data(), top_.gpu_data());
Dtype* top_data = top_.mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* clip = NULL;
if (bottom.size() > 1) {
clip = bottom[1]->gpu_data();
CHECK_EQ(bottom[1]->num(), bottom[1]->count());
}
const Dtype* weight_i = this->blobs_[0]->gpu_data();
const Dtype* weight_h = this->blobs_[1]->gpu_data();
const Dtype* bias = this->blobs_[2]->gpu_data();
Dtype* pre_gate_data = pre_gate_.mutable_gpu_data();
Dtype* gate_data = gate_.mutable_gpu_data();
Dtype* cell_data = cell_.mutable_gpu_data();
// Initialize previous state
if (clip) {
caffe_copy(c_0_.count(), c_T_.gpu_data(), c_0_.mutable_gpu_data());
caffe_copy(h_0_.count(), h_T_.gpu_data(), h_0_.mutable_gpu_data());
}
else {
caffe_gpu_set(c_0_.count(), Dtype(0.), c_0_.mutable_gpu_data());
caffe_gpu_set(h_0_.count(), Dtype(0.), h_0_.mutable_gpu_data());
}
// Compute input to hidden forward propagation
caffe_gpu_gemm(CblasNoTrans, CblasTrans, T_*N_, 4*H_, I_, Dtype(1.),
bottom_data, weight_i, Dtype(0.), pre_gate_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, 4*H_, 1, Dtype(1.),
bias_multiplier_.gpu_data(), bias, Dtype(1.), pre_gate_data);
// Compute recurrent forward propagation
for (int t = 0; t < T_; ++t) {
Dtype* h_t = top_data + top_.offset(t);
Dtype* c_t = cell_data + cell_.offset(t);
Dtype* pre_gate_t = pre_gate_data + pre_gate_.offset(t);
Dtype* gate_t = gate_data + gate_.offset(t);
const Dtype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL;
const Dtype* h_t_1 = t > 0 ? (h_t - top_.offset(1)) : h_0_.gpu_data();
const Dtype* c_t_1 = t > 0 ? (c_t - cell_.offset(1)) : c_0_.gpu_data();
caffe_gpu_gemm(CblasNoTrans, CblasTrans, N_, 4*H_, H_, Dtype(1.),
h_t_1, weight_h, Dtype(0.), h_to_gate_.mutable_gpu_data());
hipLaunchKernelGGL(( ClipAdd<Dtype>), dim3(CAFFE_GET_BLOCKS(4*N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
4*N_*H_, 4*H_, t, clip_t, h_to_gate_.gpu_data(), pre_gate_t);
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( ActivationForward<Dtype>), dim3(CAFFE_GET_BLOCKS(4*N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
4*N_*H_, H_, pre_gate_t, gate_t);
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( LSTMForward<Dtype>), dim3(CAFFE_GET_BLOCKS(N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N_*H_, H_, t, c_t_1, gate_t, clip_t, c_t, h_t);
CUDA_POST_KERNEL_CHECK;
}
// Preserve cell state and output value for truncated BPTT
caffe_copy(N_*H_, cell_data + cell_.offset(T_-1), c_T_.mutable_gpu_data());
caffe_copy(N_*H_, top_data + top_.offset(T_-1), h_T_.mutable_gpu_data());
}
template <typename Dtype>
void LstmLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_data = top_.gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* clip = NULL;
if (bottom.size() > 1) {
clip = bottom[1]->gpu_data();
CHECK_EQ(bottom[1]->num(), bottom[1]->count());
}
const Dtype* weight_i = this->blobs_[0]->gpu_data();
const Dtype* weight_h = this->blobs_[1]->gpu_data();
const Dtype* gate_data = gate_.gpu_data();
const Dtype* cell_data = cell_.gpu_data();
Dtype* top_diff = top_.mutable_gpu_diff();
Dtype* pre_gate_diff = pre_gate_.mutable_gpu_diff();
Dtype* gate_diff = gate_.mutable_gpu_diff();
Dtype* cell_diff = cell_.mutable_gpu_diff();
caffe_copy(N_*H_, c_T_.gpu_diff(), cell_diff + cell_.offset(T_-1));
for (int t = T_-1; t >= 0; --t) {
Dtype* dh_t = top_diff + top_.offset(t);
Dtype* dc_t = cell_diff + cell_.offset(t);
Dtype* gate_diff_t = gate_diff + gate_.offset(t);
Dtype* pre_gate_diff_t = pre_gate_diff + pre_gate_.offset(t);
Dtype* dh_t_1 = t > 0 ? top_diff + top_.offset(t-1) : h_0_.mutable_gpu_diff();
Dtype* dc_t_1 = t > 0 ? cell_diff + cell_.offset(t-1) : c_0_.mutable_gpu_diff();
const Dtype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL;
const Dtype* c_t = cell_data + cell_.offset(t);
const Dtype* c_t_1 = t > 0 ? cell_data + cell_.offset(t-1) : c_0_.gpu_data();
const Dtype* gate_t = gate_data + gate_.offset(t);
hipLaunchKernelGGL(( LSTMBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N_*H_, H_, t, c_t_1, gate_t, c_t, clip_t, dc_t, dh_t, dc_t_1, gate_diff_t);
CUDA_POST_KERNEL_CHECK;
hipLaunchKernelGGL(( ActivationBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(4*N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
4*N_*H_, H_, clipping_threshold_, gate_t, gate_diff_t, pre_gate_diff_t);
CUDA_POST_KERNEL_CHECK;
// Backprop errors to the previous time step
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, N_, H_, 4*H_,
Dtype(1.), pre_gate_diff_t, weight_h, Dtype(0.), h_to_h_.mutable_gpu_data());
hipLaunchKernelGGL(( ClipAdd<Dtype>), dim3(CAFFE_GET_BLOCKS(N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N_*H_, H_, t, clip_t, h_to_h_.gpu_data(), dh_t_1);
}
if (this->param_propagate_down_[0]) {
// Gradient w.r.t. input-to-hidden weight
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, I_, T_*N_, Dtype(1.),
pre_gate_diff, bottom_data, Dtype(1.), this->blobs_[0]->mutable_gpu_diff());
}
if (this->param_propagate_down_[1]) {
// Gradient w.r.t. hidden-to-hidden weight
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, H_, (T_-1)*N_, Dtype(1.),
pre_gate_diff + pre_gate_.offset(1), top_data,
Dtype(1.), this->blobs_[1]->mutable_gpu_diff());
// Add Gradient from previous time-step
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, H_, 1, Dtype(1.),
pre_gate_diff, h_0_.gpu_data(),
Dtype(1.), this->blobs_[1]->mutable_gpu_diff());
}
if (this->param_propagate_down_[2]) {
// Gradient w.r.t. bias
caffe_gpu_gemv(CblasTrans, T_*N_, 4*H_, Dtype(1.), pre_gate_diff,
bias_multiplier_.gpu_data(), Dtype(1.),
this->blobs_[2]->mutable_gpu_diff());
}
if (propagate_down[0]) {
// Gradient w.r.t. bottom data
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, I_, 4*H_, Dtype(1.),
pre_gate_diff, weight_i, Dtype(0.), bottom[0]->mutable_gpu_diff());
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LstmLayer);
} // namespace caffe
| 9f3ec959ce753b6c6dff8350793b78dac960b895.cu | #include <vector>
#include <algorithm>
#include <cmath>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/layers/lstm_common_layers.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype tanh(const Dtype x) {
return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1);
}
template <typename Dtype>
__global__ void ClipAdd(const int nthreads, const int dim, int t,
const Dtype* clip, const Dtype* add_vec, Dtype* data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
data[index] += clip_t * add_vec[index];
}
}
template <typename Dtype>
__global__ void ActivationForward(const int nthreads, const int H,
const Dtype* pre_gate, Dtype* gate) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4*H);
gate[index] = d < 3*H ? sigmoid(pre_gate[index]) : tanh(pre_gate[index]);
}
}
template <typename Dtype>
__global__ void LSTMForward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* clip,
Dtype* c_t, Dtype* h_t) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* offset = gate + 4*H*n;
const Dtype i_t = offset[d];
const Dtype f_t = offset[H + d];
const Dtype o_t = offset[2*H + d];
const Dtype g_t = offset[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
c_t[index] = clip_t * f_t * c_t_1 + i_t * g_t;
h_t[index] = o_t * tanh(c_t[index]);
}
}
template <typename Dtype>
__global__ void LSTMBackward(const int nthreads, const int H, const int t,
const Dtype* c_prev, const Dtype* gate, const Dtype* c_t,
const Dtype* clip, Dtype* dc_t, const Dtype* dh_t,
Dtype* dc_prev, Dtype* gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / H;
const int d = index % H;
const Dtype* gate_t = gate + 4*H*n;
const Dtype i_t = gate_t[d];
const Dtype f_t = gate_t[H + d];
const Dtype o_t = gate_t[2*H + d];
const Dtype g_t = gate_t[3*H + d];
const Dtype c_t_1 = c_prev[index];
const Dtype c = c_t[index];
const Dtype tanh_c = tanh(c);
const Dtype clip_t = clip ? clip[n] : Dtype(t > 0);
Dtype* dc_t_1 = dc_prev + index;
Dtype* gate_diff_t = gate_diff + 4*H*n;
Dtype* di_t = gate_diff_t + d;
Dtype* df_t = gate_diff_t + H + d;
Dtype* do_t = gate_diff_t + 2*H + d;
Dtype* dg_t = gate_diff_t + 3*H + d;
// Output gate : tanh(c(t)) * h_diff(t)
*do_t = dh_t[index] * tanh_c;
// Cell state : o(t) * tanh'(c(t)) * h_diff(t) + f(t+1) * c_diff(t+1)
dc_t[index] += dh_t[index] * o_t * (Dtype(1) - tanh_c * tanh_c);
// c_diff(t-1) += f(t) * c_diff(t)
*dc_t_1 = clip_t * dc_t[index] * f_t;
// Forget gate : c(t-1) * c_diff(t)
*df_t = clip_t * dc_t[index] * c_t_1;
// Input gate : g(t) * c_diff(t)
*di_t = dc_t[index] * g_t;
// Input modulation gate : i(t) * c_diff(t)
*dg_t = dc_t[index] * i_t;
}
}
template <typename Dtype>
__global__ void ActivationBackward(const int nthreads, const int H,
const Dtype clip_threshold, const Dtype* gate, const Dtype* gate_diff,
Dtype* pre_gate_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int d = index % (4 * H);
const Dtype gate_val = gate[index];
if (d < 3 * H) {
pre_gate_diff[index] = gate_diff[index] * gate_val * (Dtype(1) - gate_val);
} else {
pre_gate_diff[index] = gate_diff[index] * (Dtype(1) - gate_val * gate_val);
}
if (clip_threshold > Dtype(0)) {
if (pre_gate_diff[index] < -clip_threshold) {
pre_gate_diff[index] = -clip_threshold;
}
else if (pre_gate_diff[index] > clip_threshold) {
pre_gate_diff[index] = clip_threshold;
}
}
}
}
template <typename Dtype>
void LstmLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
CHECK_EQ(top[0]->gpu_data(), top_.gpu_data());
Dtype* top_data = top_.mutable_gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* clip = NULL;
if (bottom.size() > 1) {
clip = bottom[1]->gpu_data();
CHECK_EQ(bottom[1]->num(), bottom[1]->count());
}
const Dtype* weight_i = this->blobs_[0]->gpu_data();
const Dtype* weight_h = this->blobs_[1]->gpu_data();
const Dtype* bias = this->blobs_[2]->gpu_data();
Dtype* pre_gate_data = pre_gate_.mutable_gpu_data();
Dtype* gate_data = gate_.mutable_gpu_data();
Dtype* cell_data = cell_.mutable_gpu_data();
// Initialize previous state
if (clip) {
caffe_copy(c_0_.count(), c_T_.gpu_data(), c_0_.mutable_gpu_data());
caffe_copy(h_0_.count(), h_T_.gpu_data(), h_0_.mutable_gpu_data());
}
else {
caffe_gpu_set(c_0_.count(), Dtype(0.), c_0_.mutable_gpu_data());
caffe_gpu_set(h_0_.count(), Dtype(0.), h_0_.mutable_gpu_data());
}
// Compute input to hidden forward propagation
caffe_gpu_gemm(CblasNoTrans, CblasTrans, T_*N_, 4*H_, I_, Dtype(1.),
bottom_data, weight_i, Dtype(0.), pre_gate_data);
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, 4*H_, 1, Dtype(1.),
bias_multiplier_.gpu_data(), bias, Dtype(1.), pre_gate_data);
// Compute recurrent forward propagation
for (int t = 0; t < T_; ++t) {
Dtype* h_t = top_data + top_.offset(t);
Dtype* c_t = cell_data + cell_.offset(t);
Dtype* pre_gate_t = pre_gate_data + pre_gate_.offset(t);
Dtype* gate_t = gate_data + gate_.offset(t);
const Dtype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL;
const Dtype* h_t_1 = t > 0 ? (h_t - top_.offset(1)) : h_0_.gpu_data();
const Dtype* c_t_1 = t > 0 ? (c_t - cell_.offset(1)) : c_0_.gpu_data();
caffe_gpu_gemm(CblasNoTrans, CblasTrans, N_, 4*H_, H_, Dtype(1.),
h_t_1, weight_h, Dtype(0.), h_to_gate_.mutable_gpu_data());
ClipAdd<Dtype><<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
4*N_*H_, 4*H_, t, clip_t, h_to_gate_.gpu_data(), pre_gate_t);
CUDA_POST_KERNEL_CHECK;
ActivationForward<Dtype><<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
4*N_*H_, H_, pre_gate_t, gate_t);
CUDA_POST_KERNEL_CHECK;
LSTMForward<Dtype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
N_*H_, H_, t, c_t_1, gate_t, clip_t, c_t, h_t);
CUDA_POST_KERNEL_CHECK;
}
// Preserve cell state and output value for truncated BPTT
caffe_copy(N_*H_, cell_data + cell_.offset(T_-1), c_T_.mutable_gpu_data());
caffe_copy(N_*H_, top_data + top_.offset(T_-1), h_T_.mutable_gpu_data());
}
template <typename Dtype>
void LstmLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* top_data = top_.gpu_data();
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* clip = NULL;
if (bottom.size() > 1) {
clip = bottom[1]->gpu_data();
CHECK_EQ(bottom[1]->num(), bottom[1]->count());
}
const Dtype* weight_i = this->blobs_[0]->gpu_data();
const Dtype* weight_h = this->blobs_[1]->gpu_data();
const Dtype* gate_data = gate_.gpu_data();
const Dtype* cell_data = cell_.gpu_data();
Dtype* top_diff = top_.mutable_gpu_diff();
Dtype* pre_gate_diff = pre_gate_.mutable_gpu_diff();
Dtype* gate_diff = gate_.mutable_gpu_diff();
Dtype* cell_diff = cell_.mutable_gpu_diff();
caffe_copy(N_*H_, c_T_.gpu_diff(), cell_diff + cell_.offset(T_-1));
for (int t = T_-1; t >= 0; --t) {
Dtype* dh_t = top_diff + top_.offset(t);
Dtype* dc_t = cell_diff + cell_.offset(t);
Dtype* gate_diff_t = gate_diff + gate_.offset(t);
Dtype* pre_gate_diff_t = pre_gate_diff + pre_gate_.offset(t);
Dtype* dh_t_1 = t > 0 ? top_diff + top_.offset(t-1) : h_0_.mutable_gpu_diff();
Dtype* dc_t_1 = t > 0 ? cell_diff + cell_.offset(t-1) : c_0_.mutable_gpu_diff();
const Dtype* clip_t = clip ? clip + bottom[1]->offset(t) : NULL;
const Dtype* c_t = cell_data + cell_.offset(t);
const Dtype* c_t_1 = t > 0 ? cell_data + cell_.offset(t-1) : c_0_.gpu_data();
const Dtype* gate_t = gate_data + gate_.offset(t);
LSTMBackward<Dtype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
N_*H_, H_, t, c_t_1, gate_t, c_t, clip_t, dc_t, dh_t, dc_t_1, gate_diff_t);
CUDA_POST_KERNEL_CHECK;
ActivationBackward<Dtype><<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
4*N_*H_, H_, clipping_threshold_, gate_t, gate_diff_t, pre_gate_diff_t);
CUDA_POST_KERNEL_CHECK;
// Backprop errors to the previous time step
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, N_, H_, 4*H_,
Dtype(1.), pre_gate_diff_t, weight_h, Dtype(0.), h_to_h_.mutable_gpu_data());
ClipAdd<Dtype><<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>(
N_*H_, H_, t, clip_t, h_to_h_.gpu_data(), dh_t_1);
}
if (this->param_propagate_down_[0]) {
// Gradient w.r.t. input-to-hidden weight
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, I_, T_*N_, Dtype(1.),
pre_gate_diff, bottom_data, Dtype(1.), this->blobs_[0]->mutable_gpu_diff());
}
if (this->param_propagate_down_[1]) {
// Gradient w.r.t. hidden-to-hidden weight
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, H_, (T_-1)*N_, Dtype(1.),
pre_gate_diff + pre_gate_.offset(1), top_data,
Dtype(1.), this->blobs_[1]->mutable_gpu_diff());
// Add Gradient from previous time-step
caffe_gpu_gemm(CblasTrans, CblasNoTrans, 4*H_, H_, 1, Dtype(1.),
pre_gate_diff, h_0_.gpu_data(),
Dtype(1.), this->blobs_[1]->mutable_gpu_diff());
}
if (this->param_propagate_down_[2]) {
// Gradient w.r.t. bias
caffe_gpu_gemv(CblasTrans, T_*N_, 4*H_, Dtype(1.), pre_gate_diff,
bias_multiplier_.gpu_data(), Dtype(1.),
this->blobs_[2]->mutable_gpu_diff());
}
if (propagate_down[0]) {
// Gradient w.r.t. bottom data
caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, I_, 4*H_, Dtype(1.),
pre_gate_diff, weight_i, Dtype(0.), bottom[0]->mutable_gpu_diff());
}
}
INSTANTIATE_LAYER_GPU_FUNCS(LstmLayer);
} // namespace caffe
|
2323fe4cb84e6cd4fba4821533e1af3ee4d600b1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
*
*/
#include <stdio.h>
#include <iostream>
#include<sys/time.h>
using namespace std;
#define N (200000)
void add_cpu(int *a, int *b, int *c)
{
int tid = 0;
while(tid < N)
{
c[tid] = a[tid] + b[tid];
tid += 1;
/* code */
}
}
__global__ void add(int *a, int *b, int *c)
{
int tid = blockIdx.x;
while(tid < N)
{
c[tid] = a[tid] + b[tid];
tid += 1;
/* code */
}
}
// CPU
int main_cpu()
{
int a[N], b[N], c[N];
struct timeval tv1, tv2;
for (int i = 0; i < N; i++)
{
a[i] = -i;
b[i] = i*i;
}
gettimeofday(&tv1, NULL);
add_cpu(a, b, c);
gettimeofday(&tv2, NULL);
float time = (1000000 * (tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec- tv1.tv_usec)/1000.0;
cout << "time cpu " << time << "ms, num : " << c[N-1] << endl;
return 0;
}
// GPU
int main(int argc, char const *argv[])
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
struct timeval tv1, tv2;
hipMalloc((void**)&dev_a, N * sizeof(int));
hipMalloc((void**)&dev_b, N * sizeof(int));
hipMalloc((void**)&dev_c, N * sizeof(int));
// CPU a/b
// CPUGPU
//
// CPU
for(unsigned i = 0; i < N; ++i)
{
a[i] = -i;
b[i] = i*i;
}
hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_c, c, N * sizeof(int), hipMemcpyHostToDevice);
gettimeofday(&tv1, NULL);
// kernel<<<1,1>>>gpu11
// <<<256,1>>>gpu2561,
// GPUN
//
// int tid = blockIdx.x
// tid, blockIdx cuda
//
//
// int tid = blockIdx cuda
//
hipLaunchKernelGGL(( add), dim3(32), dim3(512), 0, 0, dev_a, dev_b, dev_c);
// add<<<(N+255)/256, 256>>>(dev_a, dev_b, dev_c);
gettimeofday(&tv2, NULL);
float time = (1000000 * (tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec- tv1.tv_usec)/1000.0;
cout << "time gpu " << time << "ms\n";
hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost);
for(unsigned i = 0; i < 10; ++i)
{
printf("%2d + %2d = %2d\n", a[i], b[i], c[i]);
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
main_cpu();
/* code */
return 0;
}
// time gpu 0.048ms
// time cpu 1.248 | 2323fe4cb84e6cd4fba4821533e1af3ee4d600b1.cu | /**
* 并行计算
*/
#include <stdio.h>
#include <iostream>
#include<sys/time.h>
using namespace std;
#define N (200000)
void add_cpu(int *a, int *b, int *c)
{
int tid = 0;
while(tid < N)
{
c[tid] = a[tid] + b[tid];
tid += 1;
/* code */
}
}
__global__ void add(int *a, int *b, int *c)
{
int tid = blockIdx.x;
while(tid < N)
{
c[tid] = a[tid] + b[tid];
tid += 1;
/* code */
}
}
// CPU 求和
int main_cpu()
{
int a[N], b[N], c[N];
struct timeval tv1, tv2;
for (int i = 0; i < N; i++)
{
a[i] = -i;
b[i] = i*i;
}
gettimeofday(&tv1, NULL);
add_cpu(a, b, c);
gettimeofday(&tv2, NULL);
float time = (1000000 * (tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec- tv1.tv_usec)/1000.0;
cout << "time cpu: " << time << "ms, num : " << c[N-1] << endl;
return 0;
}
// GPU 求和
int main(int argc, char const *argv[])
{
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
struct timeval tv1, tv2;
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));
// 在CPU上为数组 a/b赋值
// 这里在CPU就给输出数据赋初值,并没有特殊原因。事实上,如果在GPU上对数组赋值,这个步骤执行的会更快。
// 但是这段代码的目的是说明如何在显卡上实现两个矢量的加法运算,因此我们仅仅将计算部分放在显卡上实现,
// 输入则在CPU上进行。
for(unsigned i = 0; i < N; ++i)
{
a[i] = -i;
b[i] = i*i;
}
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c, N * sizeof(int), cudaMemcpyHostToDevice);
gettimeofday(&tv1, NULL);
// 调用kernel函数,<<<1,1>>>指gpu启动1个线程块,每个线程块中有1个线程
// <<<256,1>>>指gpu启动256个线程块,每个线程块中有1个线程, 如果是这样,就会有一个问题:
// 既然GPU将运行核函数的N个副本,那如何在代码中知道当前正在运行的是哪一个线程块?
// 这个问题可以在代码中找出答案:
// int tid = blockIdx.x
// 乍一看,将一个没有定义的变量赋值给了变量tid,但是 blockIdx 是一个内置变量,在cuda运行是中已经定义了。
// 这个变量把包含的值就是当前执行设备代码的的线程块索引。
//
// 问题又来了,为什么不是写 int tid = blockIdx呢? 事实上,这是因为cuda支持二维的线程块数组,对于二维空间的计算问题,
// 例如矩阵数学运算或者图像处理,使用二维索引往往回答来很大的便利,因为他可以避免将线性索引转换为矩形索引。
add<<<32, 512>>>(dev_a, dev_b, dev_c);
// add<<<(N+255)/256, 256>>>(dev_a, dev_b, dev_c);
gettimeofday(&tv2, NULL);
float time = (1000000 * (tv2.tv_sec - tv1.tv_sec) + tv2.tv_usec- tv1.tv_usec)/1000.0;
cout << "time gpu: " << time << "ms\n";
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
for(unsigned i = 0; i < 10; ++i)
{
printf("%2d + %2d = %2d\n", a[i], b[i], c[i]);
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
main_cpu();
/* code */
return 0;
}
// time gpu: 0.048ms
// time cpu: 1.248 |
1e1d1b8ff4c32a416b326829a917049137f9aa39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***********************************************************
tissueGPU3.cu
GPU kernel to accumulate contributions of vessel source
strengths qv to tissue solute levels pt.
Each tissue point is assigned one thread.
TWS January 2012
************************************************************/
#include <stdio.h>
#include <cutil_inline.h>
__global__ void tissueGPU3Kernel(float *d_tissxyz, float *d_vessxyz, float *d_pt000, float *d_qv000,
int nnt, int nnv, int is2d, float req, float r2d)
{
int itp = blockDim.x * blockIdx.x + threadIdx.x;
int jvp,nnv2=2*nnv;
float p = 0., xt,yt,zt,x,y,z,dist2,gtv,req2=req*req,r2d2=r2d*r2d;
if(itp < nnt){
xt = d_tissxyz[itp];
yt = d_tissxyz[itp+nnt];
zt = d_tissxyz[itp+nnt*2];
for(jvp=0; jvp<nnv; jvp++){
x = d_vessxyz[jvp] - xt;
y = d_vessxyz[jvp+nnv] - yt;
z = d_vessxyz[jvp+nnv2] - zt;
dist2 = x*x + y*y + z*z;
if(dist2 < req2){
if(is2d) gtv = log(r2d2/req2) + 1. - dist2/req2;
else gtv = (1.5 - 0.5*dist2/req2)/req;
}
else{
if(is2d) gtv = log(r2d2/dist2);
else gtv = 1./sqrt(dist2);
}
p += d_qv000[jvp]*gtv;
}
d_pt000[itp] = p;
}
}
extern "C" void tissueGPU3(float *d_tissxyz, float *d_vessxyz, float *d_pt000, float *d_qv000,
int nnt, int nnv, int is2d, float req, float r2d)
{
int threadsPerBlock = 256;
int blocksPerGrid = (nnt + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( tissueGPU3Kernel), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_tissxyz, d_vessxyz, d_pt000, d_qv000,
nnt, nnv, is2d, req, r2d);
} | 1e1d1b8ff4c32a416b326829a917049137f9aa39.cu | /***********************************************************
tissueGPU3.cu
GPU kernel to accumulate contributions of vessel source
strengths qv to tissue solute levels pt.
Each tissue point is assigned one thread.
TWS January 2012
************************************************************/
#include <stdio.h>
#include <cutil_inline.h>
__global__ void tissueGPU3Kernel(float *d_tissxyz, float *d_vessxyz, float *d_pt000, float *d_qv000,
int nnt, int nnv, int is2d, float req, float r2d)
{
int itp = blockDim.x * blockIdx.x + threadIdx.x;
int jvp,nnv2=2*nnv;
float p = 0., xt,yt,zt,x,y,z,dist2,gtv,req2=req*req,r2d2=r2d*r2d;
if(itp < nnt){
xt = d_tissxyz[itp];
yt = d_tissxyz[itp+nnt];
zt = d_tissxyz[itp+nnt*2];
for(jvp=0; jvp<nnv; jvp++){
x = d_vessxyz[jvp] - xt;
y = d_vessxyz[jvp+nnv] - yt;
z = d_vessxyz[jvp+nnv2] - zt;
dist2 = x*x + y*y + z*z;
if(dist2 < req2){
if(is2d) gtv = log(r2d2/req2) + 1. - dist2/req2;
else gtv = (1.5 - 0.5*dist2/req2)/req;
}
else{
if(is2d) gtv = log(r2d2/dist2);
else gtv = 1./sqrt(dist2);
}
p += d_qv000[jvp]*gtv;
}
d_pt000[itp] = p;
}
}
extern "C" void tissueGPU3(float *d_tissxyz, float *d_vessxyz, float *d_pt000, float *d_qv000,
int nnt, int nnv, int is2d, float req, float r2d)
{
int threadsPerBlock = 256;
int blocksPerGrid = (nnt + threadsPerBlock - 1) / threadsPerBlock;
tissueGPU3Kernel<<<blocksPerGrid, threadsPerBlock>>>(d_tissxyz, d_vessxyz, d_pt000, d_qv000,
nnt, nnv, is2d, req, r2d);
} |
101b11bbba72da4effb686b2726cc5e4901608e1.hip | // !!! This is a file automatically generated by hipify!!!
#include "GpuFocalProcessing.cuh"
#include "GpuTimer_hip.cuh"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cmath>
using namespace winGpu;
__global__ void applyFocalOpGpu(FocalRasterGpu rasterInput, FocalRasterGpu rasterOutput, FocalKernelGpu kernel, int rowIter)
{
int h = blockDim.x * blockIdx.x + threadIdx.x + rowIter;
int w = blockDim.y * blockIdx.y + threadIdx.y;
if (rasterInput.height <= h || rasterInput.width <= w)
{
return;
}
if (rasterInput(h, w) == rasterInput.defaultValue)
{
rasterOutput(h, w) = rasterInput(h, w);
return;
}
double sum = 0.0;
for (int i = 0; i < kernel.sideSize; ++i)
{
for (int j = 0; j < kernel.sideSize; ++j)
{
pixel value = rasterInput(h + (i - kernel.midSize), w + (j - kernel.midSize));
if (value == rasterInput.defaultValue)
{
rasterOutput(h, w) = rasterInput(h, w);
return;
}
sum += kernel[i][j] * value;
}
}
if (sum <= 0)
{
sum = 0.0;
}
rasterOutput(h, w) = (pixel)sum;
}
double winGpu::doFocalOpGpu(pixel* input, int height, int width, pixel* output, std::vector<double> matrix)
{
// Rater
FocalRasterGpu rasterInput;
rasterInput.height = height;
rasterInput.width = width;
rasterInput.data = 0;
// Rater
FocalRasterGpu rasterOutput;
rasterOutput.height = height;
rasterOutput.width = width;
rasterOutput.data = 0;
// Kernel
FocalKernelGpu kernelTemp;
kernelTemp.sideSize = (int)std::sqrt(matrix.size());
kernelTemp.ker = matrix.data();
kernelTemp.midSize = kernelTemp.sideSize / 2;
FocalKernelGpu kernel;
kernel.sideSize = kernelTemp.sideSize;
kernel.midSize = kernelTemp.midSize;
kernel.ker = 0;
hipSetDevice(0);
hipMalloc((void**)&rasterInput.data, rasterInput.size());
hipMalloc((void**)&rasterOutput.data, rasterOutput.size());
hipMalloc((void**)&kernel.ker, kernel.size());
hipMemcpy(rasterInput.data, input, rasterInput.size(), hipMemcpyHostToDevice);
hipMemcpy(kernel.ker, kernelTemp.ker, kernel.size(), hipMemcpyHostToDevice);
const size_t maxAvaliableCoords = 8000000;
int countRowsPerIter = maxAvaliableCoords / width;
int countIter = height / countRowsPerIter + 1;
const size_t size = width * countRowsPerIter;
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(countRowsPerIter / threadsPerBlock.x + 1, width / threadsPerBlock.y + 1);
float time;
GPU_TIMER_START;
for (int i = 0; i < countIter; i++)
{
int rowIter = i * countRowsPerIter;
applyFocalOpGpu << <numBlocks, threadsPerBlock >> > (rasterInput, rasterOutput, kernel, rowIter);
hipDeviceSynchronize();
int k = 5;
}
GPU_TIMER_STOP(time);
hipMemcpy(output, rasterOutput.data, rasterOutput.size(), hipMemcpyDeviceToHost);
hipFree(rasterInput.data);
hipFree(rasterOutput.data);
hipFree(kernel.ker);
return (double)time;
}
| 101b11bbba72da4effb686b2726cc5e4901608e1.cu | #include "GpuFocalProcessing.cuh"
#include "GpuTimer.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cmath>
using namespace winGpu;
__global__ void applyFocalOpGpu(FocalRasterGpu rasterInput, FocalRasterGpu rasterOutput, FocalKernelGpu kernel, int rowIter)
{
int h = blockDim.x * blockIdx.x + threadIdx.x + rowIter;
int w = blockDim.y * blockIdx.y + threadIdx.y;
if (rasterInput.height <= h || rasterInput.width <= w)
{
return;
}
if (rasterInput(h, w) == rasterInput.defaultValue)
{
rasterOutput(h, w) = rasterInput(h, w);
return;
}
double sum = 0.0;
for (int i = 0; i < kernel.sideSize; ++i)
{
for (int j = 0; j < kernel.sideSize; ++j)
{
pixel value = rasterInput(h + (i - kernel.midSize), w + (j - kernel.midSize));
if (value == rasterInput.defaultValue)
{
rasterOutput(h, w) = rasterInput(h, w);
return;
}
sum += kernel[i][j] * value;
}
}
if (sum <= 0)
{
sum = 0.0;
}
rasterOutput(h, w) = (pixel)sum;
}
double winGpu::doFocalOpGpu(pixel* input, int height, int width, pixel* output, std::vector<double> matrix)
{
// Создаем Rater для входных данных
FocalRasterGpu rasterInput;
rasterInput.height = height;
rasterInput.width = width;
rasterInput.data = 0;
// Создаем Rater для выходных данных
FocalRasterGpu rasterOutput;
rasterOutput.height = height;
rasterOutput.width = width;
rasterOutput.data = 0;
// Создаем Kernel для применения матрицы свертки
FocalKernelGpu kernelTemp;
kernelTemp.sideSize = (int)std::sqrt(matrix.size());
kernelTemp.ker = matrix.data();
kernelTemp.midSize = kernelTemp.sideSize / 2;
FocalKernelGpu kernel;
kernel.sideSize = kernelTemp.sideSize;
kernel.midSize = kernelTemp.midSize;
kernel.ker = 0;
cudaSetDevice(0);
cudaMalloc((void**)&rasterInput.data, rasterInput.size());
cudaMalloc((void**)&rasterOutput.data, rasterOutput.size());
cudaMalloc((void**)&kernel.ker, kernel.size());
cudaMemcpy(rasterInput.data, input, rasterInput.size(), cudaMemcpyHostToDevice);
cudaMemcpy(kernel.ker, kernelTemp.ker, kernel.size(), cudaMemcpyHostToDevice);
const size_t maxAvaliableCoords = 8000000;
int countRowsPerIter = maxAvaliableCoords / width;
int countIter = height / countRowsPerIter + 1;
const size_t size = width * countRowsPerIter;
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(countRowsPerIter / threadsPerBlock.x + 1, width / threadsPerBlock.y + 1);
float time;
GPU_TIMER_START;
for (int i = 0; i < countIter; i++)
{
int rowIter = i * countRowsPerIter;
applyFocalOpGpu << <numBlocks, threadsPerBlock >> > (rasterInput, rasterOutput, kernel, rowIter);
cudaDeviceSynchronize();
int k = 5;
}
GPU_TIMER_STOP(time);
cudaMemcpy(output, rasterOutput.data, rasterOutput.size(), cudaMemcpyDeviceToHost);
cudaFree(rasterInput.data);
cudaFree(rasterOutput.data);
cudaFree(kernel.ker);
return (double)time;
}
|
27acfc8c000901822f3591d0a5c2efdac638e80a.hip | // !!! This is a file automatically generated by hipify!!!
//=================================================================//
// CUDA SSSP kernel
// Data-Driven: one node per thread, thread_centric,
// use atomicMin & atomicAdd
// Reference:
// Sungpack Hong, et al. Accelerating CUDA graph algorithms
// at maximum warp
//=================================================================//
#include <hip/hip_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
#define WORKLIST_SIZE 8777216
#define LOCAL_SIZE 128
// a dummy worklist that you can only push or clear
typedef struct my_worklist
{
void init(void)
{
cudaErrCheck( hipMalloc((void**)&item_array, WORKLIST_SIZE*sizeof(uint64_t)) );
cudaErrCheck( hipMalloc((void**)&end, sizeof(uint32_t)) );
clear();
}
void clear(void)
{
uint32_t zeronum=0;
cudaErrCheck( hipMemcpy(end, &zeronum, sizeof(uint32_t),
hipMemcpyHostToDevice) );
}
void free(void)
{
cudaErrCheck( hipFree(item_array) );
cudaErrCheck( hipFree(end) );
}
__device__ void pushRange(uint64_t * from_array, uint32_t num)
{
uint32_t old_end = atomicAdd(end, num);
for (uint32_t i=0;i<num;i++)
{
item_array[i+old_end] = from_array[i];
}
}
__device__ inline uint64_t get_item(unsigned index)
{
return item_array[index];
}
__device__ inline uint32_t get_item_num(void)
{
return (*end);
}
void host_initPush(uint64_t * from_array, uint32_t num)
{
cudaErrCheck( hipMemcpy(end, &num, sizeof(uint32_t),
hipMemcpyHostToDevice) );
cudaErrCheck( hipMemcpy(item_array, from_array, num*sizeof(uint64_t),
hipMemcpyHostToDevice) );
}
uint64_t *item_array;
uint32_t *end;
}my_worklist;
__global__ void initialize(uint32_t * d_vpl, uint32_t * d_update, bool * d_mask, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_vpl[tid] = MY_INFINITY;
d_update[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist,
uint32_t * eplist,
uint32_t * update,
cudaGraph graph,
my_worklist inworklist,
my_worklist outworklist)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= inworklist.get_item_num()) return;
uint64_t v = inworklist.get_item(tid);
uint64_t e_begin = graph.get_firstedge_index(v);
uint64_t e_end = graph.get_edge_index_end(v);
uint32_t cost = vplist[v];
uint64_t local_worklist[LOCAL_SIZE];
uint32_t work_size=0;
for (unsigned i=e_begin;i<e_end;i++)
{
uint64_t vid = graph.get_edge_dest(i);
uint32_t new_dist = cost + eplist[i];
if ( update[vid] > new_dist)
{
if (atomicMin(&(update[vid]), new_dist)>new_dist)
local_worklist[work_size++]=vid;
if (work_size==LOCAL_SIZE)
{
outworklist.pushRange(local_worklist, work_size);
work_size = 0;
}
}
}
// push local worklist to shared worklist
outworklist.pushRange(local_worklist, work_size);
}
__global__
void kernel2(uint32_t * vplist,
uint32_t * update,
cudaGraph graph,
my_worklist inworklist)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= inworklist.get_item_num()) return;
uint64_t v = inworklist.get_item(tid);
if (vplist[v] > update[v])
{
vplist[v] = update[v];
}
}
void cuda_SSSP(uint64_t * vertexlist,
uint64_t * edgelist,
uint32_t * vproplist,
uint32_t * eproplist,
uint64_t vertex_cnt,
uint64_t edge_cnt,
uint64_t root)
{
uint32_t * device_vpl = 0;
uint32_t * device_epl = 0;
uint32_t * device_update = 0;
bool * device_mask = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
hipGetDevice(&device);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( hipMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( hipMalloc((void**)&device_update, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( hipMalloc((void**)&device_epl, edge_cnt*sizeof(uint32_t)) );
cudaErrCheck( hipMalloc((void**)&device_over, sizeof(bool)) );
hipEvent_t start_event, stop_event;
cudaErrCheck( hipEventCreate(&start_event) );
cudaErrCheck( hipEventCreate(&stop_event) );
// initialization
hipLaunchKernelGGL(( initialize), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, device_update, device_mask, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
// initialize the worklists for in & out
my_worklist worklist1, worklist2;
worklist1.init();
worklist2.init();
my_worklist * in_worklist = &worklist1;
my_worklist * out_worklist = &worklist2;
in_worklist->host_initPush(&root, 1);
uint32_t zeronum=0;
// memcpy from host to device
hipEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
// set root vprop
cudaErrCheck( hipMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t),
hipMemcpyHostToDevice) );
// copy edge prop to device
cudaErrCheck( hipMemcpy(device_epl, eproplist, edge_cnt*sizeof(uint32_t),
hipMemcpyHostToDevice) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// traversal
hipEventRecord(start_event, 0);
int curr=0;
unsigned wl_size=1;
num_block = 1;
num_thread_per_block = 1;
while(wl_size!=0)
{
hipLaunchKernelGGL(( kernel), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, device_epl,
device_update, d_graph, *in_worklist, *out_worklist);
my_worklist * temp=in_worklist;
in_worklist = out_worklist;
out_worklist = temp;
cudaErrCheck( hipMemcpy(&wl_size, in_worklist->end, sizeof(uint32_t), hipMemcpyDeviceToHost) );
out_worklist->clear();
num_thread_per_block = (unsigned int) wl_size;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
num_block = (unsigned int)ceil( wl_size/(double)num_thread_per_block );
hipLaunchKernelGGL(( kernel2), dim3(num_block), dim3(num_thread_per_block), 0, 0, device_vpl, device_update,
d_graph, *in_worklist);
curr++;
}
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&kernel_time, start_event, stop_event);
hipEventRecord(start_event, 0);
cudaErrCheck( hipMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
hipMemcpyDeviceToHost) );
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
#ifndef ENABLE_VERIFY
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
#endif
hipEventDestroy(start_event);
hipEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
in_worklist->free();
out_worklist->free();
cudaErrCheck( hipFree(device_vpl) );
cudaErrCheck( hipFree(device_epl) );
cudaErrCheck( hipFree(device_update) );
cudaErrCheck( hipFree(device_mask) );
}
| 27acfc8c000901822f3591d0a5c2efdac638e80a.cu | //=================================================================//
// CUDA SSSP kernel
// Data-Driven: one node per thread, thread_centric,
// use atomicMin & atomicAdd
// Reference:
// Sungpack Hong, et al. Accelerating CUDA graph algorithms
// at maximum warp
//=================================================================//
#include <cuda.h>
#include <stdint.h>
#include <stdio.h>
#include "cudaGraph.h"
#define WORKLIST_SIZE 8777216
#define LOCAL_SIZE 128
// a dummy worklist that you can only push or clear
typedef struct my_worklist
{
void init(void)
{
cudaErrCheck( cudaMalloc((void**)&item_array, WORKLIST_SIZE*sizeof(uint64_t)) );
cudaErrCheck( cudaMalloc((void**)&end, sizeof(uint32_t)) );
clear();
}
void clear(void)
{
uint32_t zeronum=0;
cudaErrCheck( cudaMemcpy(end, &zeronum, sizeof(uint32_t),
cudaMemcpyHostToDevice) );
}
void free(void)
{
cudaErrCheck( cudaFree(item_array) );
cudaErrCheck( cudaFree(end) );
}
__device__ void pushRange(uint64_t * from_array, uint32_t num)
{
uint32_t old_end = atomicAdd(end, num);
for (uint32_t i=0;i<num;i++)
{
item_array[i+old_end] = from_array[i];
}
}
__device__ inline uint64_t get_item(unsigned index)
{
return item_array[index];
}
__device__ inline uint32_t get_item_num(void)
{
return (*end);
}
void host_initPush(uint64_t * from_array, uint32_t num)
{
cudaErrCheck( cudaMemcpy(end, &num, sizeof(uint32_t),
cudaMemcpyHostToDevice) );
cudaErrCheck( cudaMemcpy(item_array, from_array, num*sizeof(uint64_t),
cudaMemcpyHostToDevice) );
}
uint64_t *item_array;
uint32_t *end;
}my_worklist;
__global__ void initialize(uint32_t * d_vpl, uint32_t * d_update, bool * d_mask, uint64_t num_vertex)
{
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if ( tid < num_vertex )
{
d_vpl[tid] = MY_INFINITY;
d_update[tid] = MY_INFINITY;
}
}
__global__
void kernel(uint32_t * vplist,
uint32_t * eplist,
uint32_t * update,
cudaGraph graph,
my_worklist inworklist,
my_worklist outworklist)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= inworklist.get_item_num()) return;
uint64_t v = inworklist.get_item(tid);
uint64_t e_begin = graph.get_firstedge_index(v);
uint64_t e_end = graph.get_edge_index_end(v);
uint32_t cost = vplist[v];
uint64_t local_worklist[LOCAL_SIZE];
uint32_t work_size=0;
for (unsigned i=e_begin;i<e_end;i++)
{
uint64_t vid = graph.get_edge_dest(i);
uint32_t new_dist = cost + eplist[i];
if ( update[vid] > new_dist)
{
if (atomicMin(&(update[vid]), new_dist)>new_dist)
local_worklist[work_size++]=vid;
if (work_size==LOCAL_SIZE)
{
outworklist.pushRange(local_worklist, work_size);
work_size = 0;
}
}
}
// push local worklist to shared worklist
outworklist.pushRange(local_worklist, work_size);
}
__global__
void kernel2(uint32_t * vplist,
uint32_t * update,
cudaGraph graph,
my_worklist inworklist)
{
uint64_t tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= inworklist.get_item_num()) return;
uint64_t v = inworklist.get_item(tid);
if (vplist[v] > update[v])
{
vplist[v] = update[v];
}
}
void cuda_SSSP(uint64_t * vertexlist,
uint64_t * edgelist,
uint32_t * vproplist,
uint32_t * eproplist,
uint64_t vertex_cnt,
uint64_t edge_cnt,
uint64_t root)
{
uint32_t * device_vpl = 0;
uint32_t * device_epl = 0;
uint32_t * device_update = 0;
bool * device_mask = 0;
bool * device_over = 0;
float h2d_copy_time = 0; // host to device data transfer time
float d2h_copy_time = 0; // device to host data transfer time
float kernel_time = 0; // kernel execution time
int device;
cudaGetDevice(&device);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp,device);
// Try to use as many threads as possible so that each thread
// is processing one vertex. If max thread is reached,
// split them into multiple blocks.
unsigned int num_thread_per_block = (unsigned int) vertex_cnt;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
unsigned int num_block = (unsigned int)ceil( vertex_cnt/(double)num_thread_per_block );
// malloc of gpu side
cudaErrCheck( cudaMalloc((void**)&device_vpl, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( cudaMalloc((void**)&device_update, vertex_cnt*sizeof(uint32_t)) );
cudaErrCheck( cudaMalloc((void**)&device_epl, edge_cnt*sizeof(uint32_t)) );
cudaErrCheck( cudaMalloc((void**)&device_over, sizeof(bool)) );
cudaEvent_t start_event, stop_event;
cudaErrCheck( cudaEventCreate(&start_event) );
cudaErrCheck( cudaEventCreate(&stop_event) );
// initialization
initialize<<<num_block, num_thread_per_block>>>(device_vpl, device_update, device_mask, vertex_cnt);
// prepare graph struct
// one for host side, one for device side
cudaGraph h_graph, d_graph;
// here copy only the pointers
h_graph.read(vertexlist, edgelist, vertex_cnt, edge_cnt);
// initialize the worklists for in & out
my_worklist worklist1, worklist2;
worklist1.init();
worklist2.init();
my_worklist * in_worklist = &worklist1;
my_worklist * out_worklist = &worklist2;
in_worklist->host_initPush(&root, 1);
uint32_t zeronum=0;
// memcpy from host to device
cudaEventRecord(start_event, 0);
// copy graph data to device
h_graph.cudaGraphCopy(&d_graph);
// set root vprop
cudaErrCheck( cudaMemcpy(&(device_vpl[root]), &zeronum, sizeof(uint32_t),
cudaMemcpyHostToDevice) );
// copy edge prop to device
cudaErrCheck( cudaMemcpy(device_epl, eproplist, edge_cnt*sizeof(uint32_t),
cudaMemcpyHostToDevice) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&h2d_copy_time, start_event, stop_event);
// traversal
cudaEventRecord(start_event, 0);
int curr=0;
unsigned wl_size=1;
num_block = 1;
num_thread_per_block = 1;
while(wl_size!=0)
{
kernel<<<num_block, num_thread_per_block>>>(device_vpl, device_epl,
device_update, d_graph, *in_worklist, *out_worklist);
my_worklist * temp=in_worklist;
in_worklist = out_worklist;
out_worklist = temp;
cudaErrCheck( cudaMemcpy(&wl_size, in_worklist->end, sizeof(uint32_t), cudaMemcpyDeviceToHost) );
out_worklist->clear();
num_thread_per_block = (unsigned int) wl_size;
if (num_thread_per_block > devProp.maxThreadsPerBlock)
num_thread_per_block = devProp.maxThreadsPerBlock;
num_block = (unsigned int)ceil( wl_size/(double)num_thread_per_block );
kernel2<<<num_block, num_thread_per_block>>>(device_vpl, device_update,
d_graph, *in_worklist);
curr++;
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&kernel_time, start_event, stop_event);
cudaEventRecord(start_event, 0);
cudaErrCheck( cudaMemcpy(vproplist, device_vpl, vertex_cnt*sizeof(uint32_t),
cudaMemcpyDeviceToHost) );
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&d2h_copy_time, start_event, stop_event);
printf("== iteration #: %d\n", curr);
#ifndef ENABLE_VERIFY
printf("== host->device copy time: %f ms\n", h2d_copy_time);
printf("== device->host copy time: %f ms\n", d2h_copy_time);
printf("== kernel time: %f ms\n", kernel_time);
#endif
cudaEventDestroy(start_event);
cudaEventDestroy(stop_event);
// free graph struct on device side
d_graph.cudaGraphFree();
in_worklist->free();
out_worklist->free();
cudaErrCheck( cudaFree(device_vpl) );
cudaErrCheck( cudaFree(device_epl) );
cudaErrCheck( cudaFree(device_update) );
cudaErrCheck( cudaFree(device_mask) );
}
|
509f15f783e78522521e0622a59b31f59d884ae7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "utils.h"
#include "cuda_utils.h"
#include <vector>
#include <random>
#include <iostream>
#include <chrono>
#include <string>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
using namespace std;
__global__ void linear_interp_kick(const double * input,
double * output,
const double * voltage_array,
const double * bin_centers,
const int bins,
const int n,
const double acc_kick)
{
const double center0 = bin_centers[0];
const double inv_bin_width = (bins - 1) /
(bin_centers[bins - 1] - center0);
extern __shared__ double sh_mem[];
double *sh_volt_kick = sh_mem;
double *sh_factor = &sh_mem[bins - 1];
for (size_t i = threadIdx.x;
i < bins - 1;
i += blockDim.x)
{
sh_volt_kick[i] = (voltage_array[i + 1] - voltage_array[i]) * inv_bin_width;
sh_factor[i] = voltage_array[i] - bin_centers[i] * sh_volt_kick[i] + acc_kick;
}
__syncthreads();
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
unsigned bin = (unsigned) floor((input[i] - center0) * inv_bin_width);
if (bin < bins - 1)
output[i] += input[i] * sh_volt_kick[bin] + sh_factor[bin];
else
output[i] += acc_kick;
}
}
int main(int argc, char const *argv[])
{
int n_turns = 50000;
int n_particles = 1000000;
int n_slices = 1000;
int blocks = 512;
int threads = 1024;
if (argc > 1) n_turns = atoi(argv[1]);
if (argc > 2) n_particles = atoi(argv[2]);
if (argc > 3) n_slices = atoi(argv[3]);
if (argc > 4) blocks = atoi(argv[4]);
if (argc > 5) threads = atoi(argv[5]);
// setup random engine
default_random_engine gen;
uniform_real_distribution<double> d(0.0, 1.0);
// initialize variables
vector<double> dE, dt;
vector<double> voltage, edges, bin_centers;
double cut_left, cut_right, acc_kick;
string input = HOME "/input_files/distribution_10M_particles.txt";
read_distribution(input, n_particles, dt, dE);
voltage.resize(n_slices);
for (int i = 0; i < n_slices; ++i) {
voltage[i] = d(gen);
}
cut_left = 1.05 * (*min_element(dt.begin(), dt.end()));
cut_right = 0.95 * (*max_element(dt.begin(), dt.end()));
// cut_left = dt[rand() % n_slices];
// cut_right = dt[rand() % n_slices];
acc_kick = 10e6 * d(gen);
if (cut_left > cut_right) swap(cut_left, cut_right);
edges.resize(n_slices);
linspace(cut_left, cut_right, n_slices + 1, edges.data());
bin_centers.resize(n_slices);
for (int i = 0; i < n_slices; ++i) {
bin_centers[i] = (edges[i] + edges[i + 1]) / 2.;
}
thrust::device_vector<double> dev_dE = dE;
thrust::device_vector<double> dev_dt = dt;
thrust::device_vector<double> dev_voltage = voltage;
thrust::device_vector<double> dev_bin_centers = bin_centers;
auto start = chrono::high_resolution_clock::now();
// main loop
for (int i = 0; i < n_turns; ++i) {
hipLaunchKernelGGL(( linear_interp_kick) , dim3(blocks), dim3(threads), 2 * (n_slices - 1)*sizeof(double) , 0,
thrust::raw_pointer_cast(dev_dt.data()),
thrust::raw_pointer_cast(dev_dE.data()),
thrust::raw_pointer_cast(dev_voltage.data()),
thrust::raw_pointer_cast(dev_bin_centers.data()),
n_slices, n_particles, acc_kick);
hipDeviceSynchronize();
}
auto end = chrono::high_resolution_clock::now();
thrust::copy(dev_dE.begin(), dev_dE.end(), dE.begin());
auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count();
printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n");
printf("interp_kick_gpu_v8\ttime(ms)\t%d\t0\t1\n", duration);
printf("dE: %lf\n", accumulate(dE.begin(), dE.end(), 0.0) / n_particles);
// papiprof->stop_counters();
// papiprof->report_timing();
// report results
return 0;
} | 509f15f783e78522521e0622a59b31f59d884ae7.cu | #include <stdlib.h>
#include <stdio.h>
#include "utils.h"
#include "cuda_utils.h"
#include <vector>
#include <random>
#include <iostream>
#include <chrono>
#include <string>
#include <algorithm>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
using namespace std;
__global__ void linear_interp_kick(const double * input,
double * output,
const double * voltage_array,
const double * bin_centers,
const int bins,
const int n,
const double acc_kick)
{
const double center0 = bin_centers[0];
const double inv_bin_width = (bins - 1) /
(bin_centers[bins - 1] - center0);
extern __shared__ double sh_mem[];
double *sh_volt_kick = sh_mem;
double *sh_factor = &sh_mem[bins - 1];
for (size_t i = threadIdx.x;
i < bins - 1;
i += blockDim.x)
{
sh_volt_kick[i] = (voltage_array[i + 1] - voltage_array[i]) * inv_bin_width;
sh_factor[i] = voltage_array[i] - bin_centers[i] * sh_volt_kick[i] + acc_kick;
}
__syncthreads();
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
unsigned bin = (unsigned) floor((input[i] - center0) * inv_bin_width);
if (bin < bins - 1)
output[i] += input[i] * sh_volt_kick[bin] + sh_factor[bin];
else
output[i] += acc_kick;
}
}
int main(int argc, char const *argv[])
{
int n_turns = 50000;
int n_particles = 1000000;
int n_slices = 1000;
int blocks = 512;
int threads = 1024;
if (argc > 1) n_turns = atoi(argv[1]);
if (argc > 2) n_particles = atoi(argv[2]);
if (argc > 3) n_slices = atoi(argv[3]);
if (argc > 4) blocks = atoi(argv[4]);
if (argc > 5) threads = atoi(argv[5]);
// setup random engine
default_random_engine gen;
uniform_real_distribution<double> d(0.0, 1.0);
// initialize variables
vector<double> dE, dt;
vector<double> voltage, edges, bin_centers;
double cut_left, cut_right, acc_kick;
string input = HOME "/input_files/distribution_10M_particles.txt";
read_distribution(input, n_particles, dt, dE);
voltage.resize(n_slices);
for (int i = 0; i < n_slices; ++i) {
voltage[i] = d(gen);
}
cut_left = 1.05 * (*min_element(dt.begin(), dt.end()));
cut_right = 0.95 * (*max_element(dt.begin(), dt.end()));
// cut_left = dt[rand() % n_slices];
// cut_right = dt[rand() % n_slices];
acc_kick = 10e6 * d(gen);
if (cut_left > cut_right) swap(cut_left, cut_right);
edges.resize(n_slices);
linspace(cut_left, cut_right, n_slices + 1, edges.data());
bin_centers.resize(n_slices);
for (int i = 0; i < n_slices; ++i) {
bin_centers[i] = (edges[i] + edges[i + 1]) / 2.;
}
thrust::device_vector<double> dev_dE = dE;
thrust::device_vector<double> dev_dt = dt;
thrust::device_vector<double> dev_voltage = voltage;
thrust::device_vector<double> dev_bin_centers = bin_centers;
auto start = chrono::high_resolution_clock::now();
// main loop
for (int i = 0; i < n_turns; ++i) {
linear_interp_kick <<< blocks, threads, 2 * (n_slices - 1)*sizeof(double) >>> (
thrust::raw_pointer_cast(dev_dt.data()),
thrust::raw_pointer_cast(dev_dE.data()),
thrust::raw_pointer_cast(dev_voltage.data()),
thrust::raw_pointer_cast(dev_bin_centers.data()),
n_slices, n_particles, acc_kick);
cudaThreadSynchronize();
}
auto end = chrono::high_resolution_clock::now();
thrust::copy(dev_dE.begin(), dev_dE.end(), dE.begin());
auto duration = chrono::duration_cast<chrono::milliseconds>(end - start).count();
printf("function\tcounter\taverage_value\tstd(%%)\tcalls\n");
printf("interp_kick_gpu_v8\ttime(ms)\t%d\t0\t1\n", duration);
printf("dE: %lf\n", accumulate(dE.begin(), dE.end(), 0.0) / n_particles);
// papiprof->stop_counters();
// papiprof->report_timing();
// report results
return 0;
} |
fd03813d1074a9039a0aefc9a977ca2b539f7859.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2021-2022, The Neko Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the authors nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "dirichlet_kernel.h"
#include <device/device_config.h>
#include <device/cuda/check.h>
extern "C" {
/**
* Fortran wrapper for device dirichlet apply scalar
*/
void cuda_dirichlet_apply_scalar(void *msk, void *x,
real *g, int *m) {
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks(((*m)+1024 - 1)/ 1024, 1, 1);
hipLaunchKernelGGL(( dirichlet_apply_scalar_kernel<real>)
, dim3(nblcks), dim3(nthrds), 0, (hipStream_t) glb_cmd_queue, (int *) msk,
(real *) x,
*g, *m);
CUDA_CHECK(hipGetLastError());
}
/**
* Fortran wrapper for device dirichlet apply vector
*/
void cuda_dirichlet_apply_vector(void *msk, void *x, void *y,
void *z, real *g, int *m) {
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks(((*m)+1024 - 1)/ 1024, 1, 1);
hipLaunchKernelGGL(( dirichlet_apply_vector_kernel<real>)
, dim3(nblcks), dim3(nthrds), 0, (hipStream_t) glb_cmd_queue, (int *) msk,
(real *) x,
(real *) y,
(real *) z,
*g, *m);
CUDA_CHECK(hipGetLastError());
}
}
| fd03813d1074a9039a0aefc9a977ca2b539f7859.cu | /*
Copyright (c) 2021-2022, The Neko Authors
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the authors nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "dirichlet_kernel.h"
#include <device/device_config.h>
#include <device/cuda/check.h>
extern "C" {
/**
* Fortran wrapper for device dirichlet apply scalar
*/
void cuda_dirichlet_apply_scalar(void *msk, void *x,
real *g, int *m) {
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks(((*m)+1024 - 1)/ 1024, 1, 1);
dirichlet_apply_scalar_kernel<real>
<<<nblcks, nthrds, 0, (cudaStream_t) glb_cmd_queue>>>((int *) msk,
(real *) x,
*g, *m);
CUDA_CHECK(cudaGetLastError());
}
/**
* Fortran wrapper for device dirichlet apply vector
*/
void cuda_dirichlet_apply_vector(void *msk, void *x, void *y,
void *z, real *g, int *m) {
const dim3 nthrds(1024, 1, 1);
const dim3 nblcks(((*m)+1024 - 1)/ 1024, 1, 1);
dirichlet_apply_vector_kernel<real>
<<<nblcks, nthrds, 0, (cudaStream_t) glb_cmd_queue>>>((int *) msk,
(real *) x,
(real *) y,
(real *) z,
*g, *m);
CUDA_CHECK(cudaGetLastError());
}
}
|
c0c1643e10c00aa18272d7ccef398745f4460f61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "hiprand/hiprand.h"
#include "hiprand/hiprand_kernel.h"
#include "math.h"
#include <thrust/device_vector.h>
__global__ void calc_pi(int *dev, long num_trials, double r) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_trials) return;
double x, y, test;
int Ncirc = 0;
hiprandState_t st;
hiprand_init(0, idx, 0, &st);
for (int i = 0; i < 4192; i++)
{
x = hiprand_uniform(&st);
y = hiprand_uniform(&st);
test = x * x + y * y;
if (test <= r * r) {
Ncirc++;
}
}
dev[idx] = Ncirc;
}
int main()
{
static long num_trials = 1000000000;
static int gpu_threads = 1024;
static long nblocks = ceil(num_trials / (gpu_threads * 4192) );
double r = 1.0; // radius of circle. Side of squrare is 2*r
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, NULL);
thrust::device_vector<int> dev(nblocks*gpu_threads);
hipLaunchKernelGGL(( calc_pi), dim3(nblocks), dim3(gpu_threads), 0, 0, thrust::raw_pointer_cast(dev.data()), num_trials, r);
double Ncirc = thrust::reduce(dev.begin(), dev.end(), 0.0, thrust::plus<double>());
double pi = 4.0 * ((double) Ncirc / (double) num_trials);
hipEventRecord(stop, NULL);
hipEventSynchronize(stop);
float msecTotal = 0.0f;
hipEventElapsedTime(&msecTotal, start, stop);
printf("\n%ld trials, pi is %lf \n", num_trials, pi);
printf("%.2f milisegundo(s). \n", msecTotal);
return 0;
}
| c0c1643e10c00aa18272d7ccef398745f4460f61.cu | #include <stdio.h>
#include "curand.h"
#include "curand_kernel.h"
#include "math.h"
#include <thrust/device_vector.h>
__global__ void calc_pi(int *dev, long num_trials, double r) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_trials) return;
double x, y, test;
int Ncirc = 0;
curandState st;
curand_init(0, idx, 0, &st);
for (int i = 0; i < 4192; i++)
{
x = curand_uniform(&st);
y = curand_uniform(&st);
test = x * x + y * y;
if (test <= r * r) {
Ncirc++;
}
}
dev[idx] = Ncirc;
}
int main()
{
static long num_trials = 1000000000;
static int gpu_threads = 1024;
static long nblocks = ceil(num_trials / (gpu_threads * 4192) );
double r = 1.0; // radius of circle. Side of squrare is 2*r
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
thrust::device_vector<int> dev(nblocks*gpu_threads);
calc_pi<<<nblocks, gpu_threads>>>(thrust::raw_pointer_cast(dev.data()), num_trials, r);
double Ncirc = thrust::reduce(dev.begin(), dev.end(), 0.0, thrust::plus<double>());
double pi = 4.0 * ((double) Ncirc / (double) num_trials);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
printf("\n%ld trials, pi is %lf \n", num_trials, pi);
printf("%.2f milisegundo(s). \n", msecTotal);
return 0;
}
|
70de913c4c29d1c6a653d67b14c61fd3a7ad9854.hip | // !!! This is a file automatically generated by hipify!!!
#include <kernels/gpu/inner_prod.h>
#include <core/tensor_builder.h>
#include <kernels/cpu/math_cpu.h>
#include <global/operator_factory.h>
#include <global/fp16_operator_factory.h>
#include <backend/name.h>
#include <core/device.h>
#include <utils/assert.h>
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include "kernels/gpu/cuda_context.h"
#include "core/device_context.h"
#include "utils/ctxmgr_lite.h"
#include "kernels/gpu/math_cublas.h"
#include "kernels/gpu/math_gpu.h"
namespace ts {
namespace gpu {
template<typename T>
static __global__ void gpu_inner_prod_compute_run_kernel(int m, int n, int k, const T *A, const T *B, T *C) {
__shared__ T ds_A[TRANS_BLOCK_DIM][TRANS_BLOCK_DIM];
__shared__ T ds_B[TRANS_BLOCK_DIM][TRANS_BLOCK_DIM];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
T comp = T(0.f);
T Cvalue = T(0.f);
for (int t=0; t<(n - 1) / TRANS_BLOCK_DIM + 1; ++t) {
if (Row < m && t * blockDim.x + tx < n)
ds_A[ty][tx] = A[Row*n+t*blockDim.x+tx];
else
ds_A[ty][tx] = T(0.f);
if (t * blockDim.y + ty < n && Col < k)
ds_B[ty][tx] = B[(t*blockDim.y + ty)*k+Col];
else
ds_B[ty][tx] = T(0.f);
__syncthreads();
for (int i = 0; i < blockDim.x; ++i) {
//Cvalue += ds_A[ty][i] * ds_B[i][tx];
T t;
comp -= ds_A[ty][i] * ds_B[i][tx];
t = Cvalue - comp;
comp = (t - Cvalue) + comp;
Cvalue = t;
}
__syncthreads();
if(Row < m && Col < k) {
C[Row*k+Col]=Cvalue;
}
}//end for
}
template<typename T>
static void gpu_inner_prod_compute_run(const Tensor &lhs, const Tensor &rhs, bool transpose, Tensor &out) {
const Shape &lhs_shape = lhs.sizes();
const Shape &rhs_shape = rhs.sizes();
const T *psrc = lhs.data<T>();
const T *pdot = rhs.data<T>();
T *pdst = out.data<T>();
#ifdef TS_USE_CUBLAS
auto &context = ctx::ref<DeviceContext>();
CUDAContextHandle* handle = reinterpret_cast<CUDAContextHandle*>(context.handle);
auto cublas_handle = handle->cublas_handle();
auto rhs_tranpose = transpose ? cublas::Trans : cublas::NoTrans;
auto N = transpose ? rhs_shape[0] : rhs_shape[1];
cublas::math<T>::gemm(cublas_handle, cublas::NoTrans, rhs_tranpose,
lhs_shape[0], N, lhs_shape[1], T(1.f), psrc, pdot, T(0.f), pdst);
/*cublas::math<T>::gemm(cublas_handle,cublas::RowMajor,cublas::NoTrans, cublas::NoTrans,
lhs_shape[0], rhs_shape[1], lhs_shape[1], 1,psrc, lhs_shape[1], pdot, rhs_shape[1], 0,pdst, rhs_shape[1]);*/
#else
auto rhs_tranpose = transpose ? cublas::Trans : cublas::NoTrans;
auto N = transpose ? rhs_shape[0] : rhs_shape[1];
gpu::math<T>::gemm(
cublas::NoTrans, rhs_tranpose,
lhs_shape[0], N, lhs_shape[1], T(1.f), psrc, pdot, T(0.f), pdst);
/*
dim3 blocksize(CUDA_BLOCK(rhs_shape[1], TRANS_BLOCK_DIM), CUDA_BLOCK(lhs_shape[0], TRANS_BLOCK_DIM),1);
dim3 threadsize(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM,1);
RUN_KERNEL(gpu_inner_prod_compute_run_kernel<T>, blocksize, threadsize, lhs_shape[0], lhs_shape[1], rhs_shape[1], psrc, pdot, pdst);
*/
#endif
}
void InnerProd::inner_prod(const Tensor &lhs, const Tensor &rhs, bool transpose, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { gpu_inner_prod_compute_run<TYPE>(lhs, rhs, transpose, out); break; }
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(InnerProd, GPU, name::layer::inner_prod())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(InnerProd, GPU, name::layer::inner_prod())
#endif
| 70de913c4c29d1c6a653d67b14c61fd3a7ad9854.cu | #include <kernels/gpu/inner_prod.h>
#include <core/tensor_builder.h>
#include <kernels/cpu/math_cpu.h>
#include <global/operator_factory.h>
#include <global/fp16_operator_factory.h>
#include <backend/name.h>
#include <core/device.h>
#include <utils/assert.h>
#include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include "kernels/gpu/cuda_context.h"
#include "core/device_context.h"
#include "utils/ctxmgr_lite.h"
#include "kernels/gpu/math_cublas.h"
#include "kernels/gpu/math_gpu.h"
namespace ts {
namespace gpu {
template<typename T>
static __global__ void gpu_inner_prod_compute_run_kernel(int m, int n, int k, const T *A, const T *B, T *C) {
__shared__ T ds_A[TRANS_BLOCK_DIM][TRANS_BLOCK_DIM];
__shared__ T ds_B[TRANS_BLOCK_DIM][TRANS_BLOCK_DIM];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
T comp = T(0.f);
T Cvalue = T(0.f);
for (int t=0; t<(n - 1) / TRANS_BLOCK_DIM + 1; ++t) {
if (Row < m && t * blockDim.x + tx < n)
ds_A[ty][tx] = A[Row*n+t*blockDim.x+tx];
else
ds_A[ty][tx] = T(0.f);
if (t * blockDim.y + ty < n && Col < k)
ds_B[ty][tx] = B[(t*blockDim.y + ty)*k+Col];
else
ds_B[ty][tx] = T(0.f);
__syncthreads();
for (int i = 0; i < blockDim.x; ++i) {
//Cvalue += ds_A[ty][i] * ds_B[i][tx];
T t;
comp -= ds_A[ty][i] * ds_B[i][tx];
t = Cvalue - comp;
comp = (t - Cvalue) + comp;
Cvalue = t;
}
__syncthreads();
if(Row < m && Col < k) {
C[Row*k+Col]=Cvalue;
}
}//end for
}
template<typename T>
static void gpu_inner_prod_compute_run(const Tensor &lhs, const Tensor &rhs, bool transpose, Tensor &out) {
const Shape &lhs_shape = lhs.sizes();
const Shape &rhs_shape = rhs.sizes();
const T *psrc = lhs.data<T>();
const T *pdot = rhs.data<T>();
T *pdst = out.data<T>();
#ifdef TS_USE_CUBLAS
auto &context = ctx::ref<DeviceContext>();
CUDAContextHandle* handle = reinterpret_cast<CUDAContextHandle*>(context.handle);
auto cublas_handle = handle->cublas_handle();
auto rhs_tranpose = transpose ? cublas::Trans : cublas::NoTrans;
auto N = transpose ? rhs_shape[0] : rhs_shape[1];
cublas::math<T>::gemm(cublas_handle, cublas::NoTrans, rhs_tranpose,
lhs_shape[0], N, lhs_shape[1], T(1.f), psrc, pdot, T(0.f), pdst);
/*cublas::math<T>::gemm(cublas_handle,cublas::RowMajor,cublas::NoTrans, cublas::NoTrans,
lhs_shape[0], rhs_shape[1], lhs_shape[1], 1,psrc, lhs_shape[1], pdot, rhs_shape[1], 0,pdst, rhs_shape[1]);*/
#else
auto rhs_tranpose = transpose ? cublas::Trans : cublas::NoTrans;
auto N = transpose ? rhs_shape[0] : rhs_shape[1];
gpu::math<T>::gemm(
cublas::NoTrans, rhs_tranpose,
lhs_shape[0], N, lhs_shape[1], T(1.f), psrc, pdot, T(0.f), pdst);
/*
dim3 blocksize(CUDA_BLOCK(rhs_shape[1], TRANS_BLOCK_DIM), CUDA_BLOCK(lhs_shape[0], TRANS_BLOCK_DIM),1);
dim3 threadsize(TRANS_BLOCK_DIM, TRANS_BLOCK_DIM,1);
RUN_KERNEL(gpu_inner_prod_compute_run_kernel<T>, blocksize, threadsize, lhs_shape[0], lhs_shape[1], rhs_shape[1], psrc, pdot, pdst);
*/
#endif
}
void InnerProd::inner_prod(const Tensor &lhs, const Tensor &rhs, bool transpose, Tensor &out) {
// Notice: the all tensor' memory device are CPU, as given in running_memory_device
DTYPE dtype = out.dtype();
switch (dtype) {
#define DECLARE_COMPUTE_RUN(DTYPE, TYPE) \
case DTYPE: { gpu_inner_prod_compute_run<TYPE>(lhs, rhs, transpose, out); break; }
#ifdef TS_USE_CUDA_FP16
DECLARE_COMPUTE_RUN(FLOAT16, half);
#endif
DECLARE_COMPUTE_RUN(FLOAT32, float);
DECLARE_COMPUTE_RUN(FLOAT64, double);
#undef DECLARE_COMPUTE_RUN
default: {
TS_LOG_ERROR << this->op() << " not support data type(" << dtype << "): " << type_str(dtype) << eject;
break;
}
}
}
}
}
using namespace ts;
using namespace gpu;
TS_REGISTER_OPERATOR(InnerProd, GPU, name::layer::inner_prod())
#ifdef TS_USE_CUDA_FP16
TS_REGISTER_FP16_OPERATOR(InnerProd, GPU, name::layer::inner_prod())
#endif
|
f82c7a1eb8a7579314fc0c1c91d0e89b8a13a532.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "lab1.h"
static const unsigned W = 1366;
static const unsigned H = 1024;
static const unsigned NFRAME = 480;
#define N_PARTICLES 10000
#define SCALE (1.0 / 20)
#define INIT_DISTANCE 80
#define GRAVITY 10.0
#define G_REPULSION 1.5
#define G_COHESION 1.0
#define LOSS 1.05
// typedef float2 Coord;
// typedef float2 Velocity;
// typedef short3* Velocity;
inline __host__ __device__ void operator*=(float2&a, float b) {
a.x *= b;
a.y *= b;
}
inline __host__ __device__ void operator/=(float2&a, float b) {
a.x /= b;
a.y /= b;
}
inline __host__ __device__ void operator+=(float2&a, float2 b) {
a.x += b.x;
a.y += b.y;
}
inline __host__ __device__ float2 operator-(float2 a, float2 b) {
return make_float2(a.x - b.x, a.y - b.y);
}
inline __host__ __device__ float2 operator+(float2 a, float2 b) {
return make_float2(a.x + b.x, a.y + b.y);
}
inline __host__ __device__ float2 operator*(float2 a, float b) {
return make_float2(a.x * b, a.y * b);
}
inline __host__ __device__ float2 operator/(float2 a, float b) {
return make_float2(a.x / b, a.y / b);
}
inline __host__ __device__ short3 operator*(float a, short3 b) {
return make_short3(a * b.x, a * b.y, a * b.z);
}
inline __host__ __device__ short3 operator+(short3 a, short3 b) {
return make_short3(a.x + b.x, a.y + b.y, a.z + b.z);
}
__device__ float norm(float2 v) {
return sqrt(v.x * v.x + v.y * v.y);
}
struct Lab1VideoGenerator::Impl {
int t = 0;
float2* coordinate;
float2* velocity;
float2* prev_coordinate;
float2* prev_velocity;
short3* canvas;
};
__global__ void fillKernel(short3* canvas, float alpha) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < W * H) {
canvas[i] = (1 - alpha) * canvas[i] + alpha * make_short3(0, 0, 0);
}
}
void fill(short3* canvas, float alpha=1.0){
hipLaunchKernelGGL(( fillKernel), dim3(((W * H + 255)/ 256)),dim3(256), 0, 0, canvas, alpha);
}
__global__ void rgb2yuvKernel(short3* canvas, uint8_t* yuv) {
auto x = blockDim.x * blockIdx.x + threadIdx.x;
auto y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < W && y < H) {
auto c = canvas[y * W + x];
yuv[y * W + x] = 0.299*c.x + 0.587*c.y + 0.114*c.z;
if (x % 2 == 0 && y % 2 == 0) {
auto c2 = canvas[(y + 0) * W + x + 1];
auto c3 = canvas[(y + 1) * W + x + 0];
auto c4 = canvas[(y + 1) * W + x + 1];
c.x = (c.x + c2.x + c3.x + c4.x) / 4;
c.y = (c.y + c2.y + c3.y + c4.y) / 4;
c.z = (c.z + c2.z + c3.z + c4.z) / 4;
auto indU = W*H + y/2 * W/2 + x/2;
auto indV = W*H + W*H/4 + y/2 * W/2 + x/2;
yuv[indU] = -0.169*c.x - 0.331*c.y + 0.500*c.z + 128;
yuv[indV] = 0.500*c.x - 0.419*c.y - 0.081*c.z + 128;
}
}
}
void rgb2yuv(short3* canvas, uint8_t* yuv) {
dim3 dimBlock(16, 16);
dim3 dimGrid((W + 15)/16, (H + 15)/16);
hipLaunchKernelGGL(( rgb2yuvKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, canvas, yuv);
}
__device__ void drawDot(float2 coord, short3 color, short3* canvas) {
int x = coord.x * SCALE + 20;
int y = coord.y * SCALE;
if ( x >= 0 && x < W - 1 && y >= 0 && y < H - 1) {
canvas[(y + 0)*W + x + 0] = color;
canvas[(y + 0)*W + x + 1] = color;
canvas[(y + 1)*W + x + 0] = color;
canvas[(y + 1)*W + x + 1] = color;
}
}
__global__ void initParticlesKernel(float2* coord, float2* velocity, short3* canvas) {
auto i = blockDim.x * blockIdx.x + threadIdx.x;
int xStart = (W/SCALE - INIT_DISTANCE*100) / 2;
if (i < N_PARTICLES) {
int x = i % 100;
int y = i / 100;
coord[i] = make_float2(xStart + x * INIT_DISTANCE, y * INIT_DISTANCE);
velocity[i] = make_float2(0, 0);
}
}
void initParticles(float2* coord, float2* velocity, short3* canvas) {
hipLaunchKernelGGL(( initParticlesKernel), dim3((N_PARTICLES + 255) / 256), dim3(256), 0, 0, coord, velocity, canvas);
}
__device__ short3 acce2color(float2 acce) {
// float acceNorm = acce.x * acce.x + acce.y * acce.y;
float acceNorm = norm(acce);
short3 color;
float threshold = GRAVITY;
float sig = (1 / (1 + expf(threshold - acceNorm)) - 0.5) * 255 * 4;
if (sig > 0) {
if (sig > 255) {
color.x = 255;
color.y = 255 - (sig - 255);
} else {
color.x = sig;
color.y = 255;
}
color.z = 0;
} else {
if (sig > -255) {
color.z = -sig;
color.y = 255;
} else {
color.y = 255 + (sig + 255);
color.z = 255;
}
color.x = 0;
}
return color;
}
__global__ void updateParticlesKernel(float2* prev_coord, float2* prev_velocity,
float2* coord, float2* velocity, short3* canvas) {
const short3 WHITE = make_short3(255, 255, 255);
auto i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N_PARTICLES) {
velocity[i] = prev_velocity[i];
coord[i] = prev_coord[i];
float2 acce = make_float2(0, 0);
// reflection
if (coord[i].y >= (H - 15)/SCALE) {
acce.y -= 2 * prev_velocity[i].y;
}
if (coord[i].x >= (W - 15)/SCALE) {
acce.x -= 2 * prev_velocity[i].x;
}
if (coord[i].x <= 15/SCALE) {
acce.x -= 2 * prev_velocity[i].x;
}
// calculate acceleration
acce.y += GRAVITY;
for (int j = 0; j < N_PARTICLES; ++j) {
float2 d = prev_coord[i] - prev_coord[j];
double d2 = (double)(d.x * d.x + d.y * d.y);
if (d2 < 1e-5) {
d2 = 1e-5;
}
float2 a = d / sqrt(d2) * (G_REPULSION / d2 - G_COHESION / sqrt(d2));
acce += a;
}
acce.x -= G_COHESION / (prev_coord[i].x - 15);
acce.x += G_COHESION / (W - 15 - prev_coord[i].x);
acce.y += G_COHESION / (prev_coord[i].y - 15);
// update velocity
velocity[i] += acce / LOSS;
// update coordinate
coord[i] += velocity[i] / 8;
// boundary
coord[i].x = coord[i].x < 10/SCALE ? 10/SCALE : coord[i].x;
coord[i].x = coord[i].x > (W - 10)/SCALE ? (W - 10)/SCALE : coord[i].x;
coord[i].y = coord[i].y > (H - 10)/SCALE ? (H - 10)/SCALE : coord[i].y;
short3 color = acce2color(acce);
drawDot(coord[i], color, canvas);
}
}
void updateParticles(float2* prev_coord, float2* prev_velocity,
float2* coord, float2* velocity, short3* canvas) {
hipLaunchKernelGGL(( updateParticlesKernel), dim3((N_PARTICLES + 63) / 64), dim3(64), 0, 0, prev_coord, prev_velocity,
coord, velocity, canvas);
}
Lab1VideoGenerator::Lab1VideoGenerator(): impl(new Impl) {
hipMalloc(&(impl->velocity), sizeof(float2) * N_PARTICLES);
hipMalloc(&(impl->coordinate), sizeof(float2) * N_PARTICLES);
hipMalloc(&(impl->prev_velocity), sizeof(float2) * N_PARTICLES);
hipMalloc(&(impl->prev_coordinate), sizeof(float2) * N_PARTICLES);
hipMalloc(&(impl->canvas), sizeof(short3) * W * H * 3 / 2);
fill(impl->canvas);
initParticles(impl->coordinate, impl->velocity, impl->canvas);
}
Lab1VideoGenerator::~Lab1VideoGenerator() {}
void Lab1VideoGenerator::get_info(Lab1VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
void Lab1VideoGenerator::Generate(uint8_t *yuv) {
// hipMemset(yuv, (impl->t)*255/NFRAME, W*H);
// hipMemset(yuv+W*H, 128, W*H/2);
for (int i = 0; i < 8; ++i) {
fill(impl->canvas, 0.15);
hipMemcpy(impl->prev_coordinate, impl->coordinate,
sizeof(float2) * N_PARTICLES, hipMemcpyDeviceToDevice);
hipMemcpy(impl->prev_velocity, impl->velocity,
sizeof(float2) * N_PARTICLES, hipMemcpyDeviceToDevice);
updateParticles(impl->prev_coordinate, impl->prev_velocity,
impl->coordinate, impl->velocity, impl->canvas);
rgb2yuv(impl->canvas, yuv);
}
++(impl->t);
}
| f82c7a1eb8a7579314fc0c1c91d0e89b8a13a532.cu | #include "lab1.h"
static const unsigned W = 1366;
static const unsigned H = 1024;
static const unsigned NFRAME = 480;
#define N_PARTICLES 10000
#define SCALE (1.0 / 20)
#define INIT_DISTANCE 80
#define GRAVITY 10.0
#define G_REPULSION 1.5
#define G_COHESION 1.0
#define LOSS 1.05
// typedef float2 Coord;
// typedef float2 Velocity;
// typedef short3* Velocity;
inline __host__ __device__ void operator*=(float2&a, float b) {
a.x *= b;
a.y *= b;
}
inline __host__ __device__ void operator/=(float2&a, float b) {
a.x /= b;
a.y /= b;
}
inline __host__ __device__ void operator+=(float2&a, float2 b) {
a.x += b.x;
a.y += b.y;
}
inline __host__ __device__ float2 operator-(float2 a, float2 b) {
return make_float2(a.x - b.x, a.y - b.y);
}
inline __host__ __device__ float2 operator+(float2 a, float2 b) {
return make_float2(a.x + b.x, a.y + b.y);
}
inline __host__ __device__ float2 operator*(float2 a, float b) {
return make_float2(a.x * b, a.y * b);
}
inline __host__ __device__ float2 operator/(float2 a, float b) {
return make_float2(a.x / b, a.y / b);
}
inline __host__ __device__ short3 operator*(float a, short3 b) {
return make_short3(a * b.x, a * b.y, a * b.z);
}
inline __host__ __device__ short3 operator+(short3 a, short3 b) {
return make_short3(a.x + b.x, a.y + b.y, a.z + b.z);
}
__device__ float norm(float2 v) {
return sqrt(v.x * v.x + v.y * v.y);
}
struct Lab1VideoGenerator::Impl {
int t = 0;
float2* coordinate;
float2* velocity;
float2* prev_coordinate;
float2* prev_velocity;
short3* canvas;
};
__global__ void fillKernel(short3* canvas, float alpha) {
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < W * H) {
canvas[i] = (1 - alpha) * canvas[i] + alpha * make_short3(0, 0, 0);
}
}
void fill(short3* canvas, float alpha=1.0){
fillKernel<<<((W * H + 255)/ 256),256>>>(canvas, alpha);
}
__global__ void rgb2yuvKernel(short3* canvas, uint8_t* yuv) {
auto x = blockDim.x * blockIdx.x + threadIdx.x;
auto y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < W && y < H) {
auto c = canvas[y * W + x];
yuv[y * W + x] = 0.299*c.x + 0.587*c.y + 0.114*c.z;
if (x % 2 == 0 && y % 2 == 0) {
auto c2 = canvas[(y + 0) * W + x + 1];
auto c3 = canvas[(y + 1) * W + x + 0];
auto c4 = canvas[(y + 1) * W + x + 1];
c.x = (c.x + c2.x + c3.x + c4.x) / 4;
c.y = (c.y + c2.y + c3.y + c4.y) / 4;
c.z = (c.z + c2.z + c3.z + c4.z) / 4;
auto indU = W*H + y/2 * W/2 + x/2;
auto indV = W*H + W*H/4 + y/2 * W/2 + x/2;
yuv[indU] = -0.169*c.x - 0.331*c.y + 0.500*c.z + 128;
yuv[indV] = 0.500*c.x - 0.419*c.y - 0.081*c.z + 128;
}
}
}
void rgb2yuv(short3* canvas, uint8_t* yuv) {
dim3 dimBlock(16, 16);
dim3 dimGrid((W + 15)/16, (H + 15)/16);
rgb2yuvKernel<<<dimGrid, dimBlock>>>(canvas, yuv);
}
__device__ void drawDot(float2 coord, short3 color, short3* canvas) {
int x = coord.x * SCALE + 20;
int y = coord.y * SCALE;
if ( x >= 0 && x < W - 1 && y >= 0 && y < H - 1) {
canvas[(y + 0)*W + x + 0] = color;
canvas[(y + 0)*W + x + 1] = color;
canvas[(y + 1)*W + x + 0] = color;
canvas[(y + 1)*W + x + 1] = color;
}
}
__global__ void initParticlesKernel(float2* coord, float2* velocity, short3* canvas) {
auto i = blockDim.x * blockIdx.x + threadIdx.x;
int xStart = (W/SCALE - INIT_DISTANCE*100) / 2;
if (i < N_PARTICLES) {
int x = i % 100;
int y = i / 100;
coord[i] = make_float2(xStart + x * INIT_DISTANCE, y * INIT_DISTANCE);
velocity[i] = make_float2(0, 0);
}
}
void initParticles(float2* coord, float2* velocity, short3* canvas) {
initParticlesKernel<<<(N_PARTICLES + 255) / 256, 256>>>(coord, velocity, canvas);
}
__device__ short3 acce2color(float2 acce) {
// float acceNorm = acce.x * acce.x + acce.y * acce.y;
float acceNorm = norm(acce);
short3 color;
float threshold = GRAVITY;
float sig = (1 / (1 + expf(threshold - acceNorm)) - 0.5) * 255 * 4;
if (sig > 0) {
if (sig > 255) {
color.x = 255;
color.y = 255 - (sig - 255);
} else {
color.x = sig;
color.y = 255;
}
color.z = 0;
} else {
if (sig > -255) {
color.z = -sig;
color.y = 255;
} else {
color.y = 255 + (sig + 255);
color.z = 255;
}
color.x = 0;
}
return color;
}
__global__ void updateParticlesKernel(float2* prev_coord, float2* prev_velocity,
float2* coord, float2* velocity, short3* canvas) {
const short3 WHITE = make_short3(255, 255, 255);
auto i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N_PARTICLES) {
velocity[i] = prev_velocity[i];
coord[i] = prev_coord[i];
float2 acce = make_float2(0, 0);
// reflection
if (coord[i].y >= (H - 15)/SCALE) {
acce.y -= 2 * prev_velocity[i].y;
}
if (coord[i].x >= (W - 15)/SCALE) {
acce.x -= 2 * prev_velocity[i].x;
}
if (coord[i].x <= 15/SCALE) {
acce.x -= 2 * prev_velocity[i].x;
}
// calculate acceleration
acce.y += GRAVITY;
for (int j = 0; j < N_PARTICLES; ++j) {
float2 d = prev_coord[i] - prev_coord[j];
double d2 = (double)(d.x * d.x + d.y * d.y);
if (d2 < 1e-5) {
d2 = 1e-5;
}
float2 a = d / sqrt(d2) * (G_REPULSION / d2 - G_COHESION / sqrt(d2));
acce += a;
}
acce.x -= G_COHESION / (prev_coord[i].x - 15);
acce.x += G_COHESION / (W - 15 - prev_coord[i].x);
acce.y += G_COHESION / (prev_coord[i].y - 15);
// update velocity
velocity[i] += acce / LOSS;
// update coordinate
coord[i] += velocity[i] / 8;
// boundary
coord[i].x = coord[i].x < 10/SCALE ? 10/SCALE : coord[i].x;
coord[i].x = coord[i].x > (W - 10)/SCALE ? (W - 10)/SCALE : coord[i].x;
coord[i].y = coord[i].y > (H - 10)/SCALE ? (H - 10)/SCALE : coord[i].y;
short3 color = acce2color(acce);
drawDot(coord[i], color, canvas);
}
}
void updateParticles(float2* prev_coord, float2* prev_velocity,
float2* coord, float2* velocity, short3* canvas) {
updateParticlesKernel<<<(N_PARTICLES + 63) / 64, 64>>>(prev_coord, prev_velocity,
coord, velocity, canvas);
}
Lab1VideoGenerator::Lab1VideoGenerator(): impl(new Impl) {
cudaMalloc(&(impl->velocity), sizeof(float2) * N_PARTICLES);
cudaMalloc(&(impl->coordinate), sizeof(float2) * N_PARTICLES);
cudaMalloc(&(impl->prev_velocity), sizeof(float2) * N_PARTICLES);
cudaMalloc(&(impl->prev_coordinate), sizeof(float2) * N_PARTICLES);
cudaMalloc(&(impl->canvas), sizeof(short3) * W * H * 3 / 2);
fill(impl->canvas);
initParticles(impl->coordinate, impl->velocity, impl->canvas);
}
Lab1VideoGenerator::~Lab1VideoGenerator() {}
void Lab1VideoGenerator::get_info(Lab1VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 24;
info.fps_d = 1;
};
void Lab1VideoGenerator::Generate(uint8_t *yuv) {
// cudaMemset(yuv, (impl->t)*255/NFRAME, W*H);
// cudaMemset(yuv+W*H, 128, W*H/2);
for (int i = 0; i < 8; ++i) {
fill(impl->canvas, 0.15);
cudaMemcpy(impl->prev_coordinate, impl->coordinate,
sizeof(float2) * N_PARTICLES, cudaMemcpyDeviceToDevice);
cudaMemcpy(impl->prev_velocity, impl->velocity,
sizeof(float2) * N_PARTICLES, cudaMemcpyDeviceToDevice);
updateParticles(impl->prev_coordinate, impl->prev_velocity,
impl->coordinate, impl->velocity, impl->canvas);
rgb2yuv(impl->canvas, yuv);
}
++(impl->t);
}
|
1ba10884df39c88622d7b1f660fea3a3a3015313.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include "time.h"
#include <iostream>
#include <fstream>
#include <iomanip>
#define BLOCK_SIZE 16
/*
* prints matrices
* Because matrices filled with dummy 0s function takes 3 dim arguments:
* actual x and y dimension and dim as big square matrix's dimension
*/
void print_matrices(float* matrix, char* file_Name, int x_dim, int y_dim, int dim)
{
std::ofstream outFile;
outFile.open(file_Name);
outFile << std::fixed;
outFile << std::setprecision(2);
for (int i = 0; i < x_dim; i++) {
for (int j = 0; j < y_dim; j++) {
outFile << matrix[i * dim + j] << " ";
}
outFile << std::endl;
}
}
//naive CPU matrix multiplication code
//because of its simplicity directly taken from web
//it multiplies square matrices
__host__ void cpu_matrix_mult(float* h_a, float* h_b, float* h_result, int m) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < m; ++j)
{
float tmp = 0.0;
for (int h = 0; h < m; ++h)
{
tmp += h_a[i * m + h] * h_b[h * m + j];
}
h_result[i * m + j] = tmp;
}
}
}
//this function is for filling the matrices with cos and sin values randomly
//I transform the matrices to square matrix in order to perform better multiplication
__host__ int fill(float** Lmatrix, float** Rmatrix, int LdimX, int LdimY, int RdimX, int RdimY) {
int sqr_dim_X, sqr_dim_Y, size;
sqr_dim_X = RdimX;
if (LdimX > RdimX) {
sqr_dim_X = LdimX;
}
sqr_dim_Y = RdimY;
if (LdimY > RdimY) {
sqr_dim_Y = LdimY;
}
size = sqr_dim_Y;
if (sqr_dim_X > sqr_dim_Y) {
size = sqr_dim_X;
}
int temp = size / BLOCK_SIZE + (size % BLOCK_SIZE == 0 ? 0 : 1);
size = temp * BLOCK_SIZE;
size_t pt_size = size * size * sizeof(float);
*Lmatrix = (float*)malloc(pt_size);
*Rmatrix = (float*)malloc(pt_size);
memset(*Lmatrix, 0, pt_size);
memset(*Rmatrix, 0, pt_size);
for (int i = 0; i < LdimX; i++) {
for (int j = 0; j < LdimY; j++) {
int dummy = size * i + j;
(*Lmatrix)[dummy] = sinf(dummy);
}
}
for (int i = 0; i < RdimX; i++) {
for (int j = 0; j < RdimY; j++) {
int dummy = size * i + j;
(*Rmatrix)[dummy] = cosf(dummy);
}
}
return size;
}
// Kernel that executes on the CUDA device
/* left: left operand
* right: right operand
* res : result array
* dim: M dimension of MxM matrix
* Blok_size: defines block size
*
* this function divides the matrices to tiles and load those tiles to shared memory
* After loading to shared memory it function multiplies with the corresponding tile of other matrix
* After finishing multiplication of 1 row and 1 column by collecting results of different tiles
* it stores the result in global memory
* Function has coalesced access to the global memory and prevent bank conflict
*/
__global__ void multiply(float* left, float* right, float* res, int dim) {
int i, j;
float temp = 0;
__shared__ float Left_shared_t[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
// Row i of matrix left
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
// Column j of matrix left
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
// Load left[i][j] to shared mem
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];// Coalesced access
// Load right[i][j] to shared mem
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col]; // Coalesced access
// Synchronize before computation
__syncthreads();
// Accumulate one tile of res from tiles of left and right in shared mem
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x]; //no shared memory bank conflict
}
// Synchronize
__syncthreads();
}
// Store accumulated value to res
res[row * dim + col] = temp;
}
// main routine that executes on the host
int main(void)
{
//size of the vectors to be processed and matrix dimensions
int Left_matrix_x, Left_matrix_y, Right_matrix_x, Right_matrix_y, Left_vector_size, Right_vector_size;
float* Left_Vector_h, * Right_Vector_h, * Left_Vector_d, * Right_Vector_d, * Res_h, * Res_d, * CPU; // Pointer to host & device arrays
printf("Enter m n n k :\n");
scanf("%d %d %d %d", &Left_matrix_x, &Left_matrix_y, &Right_matrix_x, &Right_matrix_y); // input matrix dimensions are taken
int dim = fill(&Left_Vector_h, &Right_Vector_h, Left_matrix_x, Left_matrix_y, Right_matrix_x, Right_matrix_y); //fills the matrices with random values
print_matrices(Left_Vector_h, "Input_LHS", Left_matrix_x, Left_matrix_y, dim);
print_matrices(Right_Vector_h, "Input_RHS", Right_matrix_x, Right_matrix_y, dim);
size_t vector_size;
vector_size = dim * dim * sizeof(float);
Res_h = (float*)malloc(vector_size); // Allocate array on host for result
CPU = (float*)malloc(vector_size);// Allocate array on host for CPU_matrix_multiplication result
//@@ Allocate GPE Memory here for Left Vector, Right Vector and Result
hipMalloc((void**)&Left_Vector_d, vector_size);
hipMalloc((void**)&Right_Vector_d, vector_size);
hipMalloc((void**)&Res_d, vector_size);
//@@ Copy memory to the GPU here
hipMemcpy(Left_Vector_d, Left_Vector_h, vector_size, hipMemcpyHostToDevice);
hipMemcpy(Right_Vector_d, Right_Vector_h, vector_size, hipMemcpyHostToDevice);
hipMemcpy(Res_d, Res_h, vector_size, hipMemcpyHostToDevice);
//Block dimension is directly from block_size
dim3 Block_dim(BLOCK_SIZE, BLOCK_SIZE);
//Grid dimension is found by dividing matrix dimension to block_size
dim3 Grid_dim(dim / BLOCK_SIZE, dim / BLOCK_SIZE);
//commented out the functions which helps to calculate time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
//@@ kernel call
multiply << <Grid_dim, Block_dim >> > (Left_Vector_d, Right_Vector_d, Res_d, dim);
//commented out the functions which helps to calculate time
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float et;
hipEventElapsedTime(&et, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
// Retrieve result from device and store it in host array
hipMemcpy(Res_h, Res_d, vector_size, hipMemcpyDeviceToHost);
clock_t begin = clock();
cpu_matrix_mult(Left_Vector_h, Right_Vector_h, CPU, dim); //matrix multiplication on cpu
clock_t end = clock();
double time_spent = (double)1000 * (end - begin) / CLOCKS_PER_SEC;
//commented out the functions which helps to calculate time
printf("GPU time= %f ms\n", et);
printf("CPU time= %lf ms\n", time_spent);
//Prints the results
print_matrices(Res_h, "GPU_out", Left_matrix_x, Right_matrix_y, dim);
print_matrices(CPU, "CPU_out", Left_matrix_x, Right_matrix_y, dim);
bool eqaul = true;
for (int i = 0; i < Left_matrix_x && eqaul; i++) {
for (int j = 0; j < Right_matrix_y && eqaul; j++) {
if (abs(Res_h[i * dim + j] - CPU[i * dim + j]) > 0.001)
{
eqaul = false;
printf("NOT EQUAL\n");
}
}
}
if (eqaul)
{
std::cout << "Results are equal!" << std::endl;
}
else
{
std::cout << "Results are NOT equal!" << std::endl;
}
//@@ Cleanup
hipFree(Left_Vector_d);
hipFree(Right_Vector_d);
hipFree(Res_d);
} | 1ba10884df39c88622d7b1f660fea3a3a3015313.cu | #include <stdio.h>
#include <string.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include "time.h"
#include <iostream>
#include <fstream>
#include <iomanip>
#define BLOCK_SIZE 16
/*
* prints matrices
* Because matrices filled with dummy 0s function takes 3 dim arguments:
* actual x and y dimension and dim as big square matrix's dimension
*/
void print_matrices(float* matrix, char* file_Name, int x_dim, int y_dim, int dim)
{
std::ofstream outFile;
outFile.open(file_Name);
outFile << std::fixed;
outFile << std::setprecision(2);
for (int i = 0; i < x_dim; i++) {
for (int j = 0; j < y_dim; j++) {
outFile << matrix[i * dim + j] << " ";
}
outFile << std::endl;
}
}
//naive CPU matrix multiplication code
//because of its simplicity directly taken from web
//it multiplies square matrices
__host__ void cpu_matrix_mult(float* h_a, float* h_b, float* h_result, int m) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < m; ++j)
{
float tmp = 0.0;
for (int h = 0; h < m; ++h)
{
tmp += h_a[i * m + h] * h_b[h * m + j];
}
h_result[i * m + j] = tmp;
}
}
}
//this function is for filling the matrices with cos and sin values randomly
//I transform the matrices to square matrix in order to perform better multiplication
__host__ int fill(float** Lmatrix, float** Rmatrix, int LdimX, int LdimY, int RdimX, int RdimY) {
int sqr_dim_X, sqr_dim_Y, size;
sqr_dim_X = RdimX;
if (LdimX > RdimX) {
sqr_dim_X = LdimX;
}
sqr_dim_Y = RdimY;
if (LdimY > RdimY) {
sqr_dim_Y = LdimY;
}
size = sqr_dim_Y;
if (sqr_dim_X > sqr_dim_Y) {
size = sqr_dim_X;
}
int temp = size / BLOCK_SIZE + (size % BLOCK_SIZE == 0 ? 0 : 1);
size = temp * BLOCK_SIZE;
size_t pt_size = size * size * sizeof(float);
*Lmatrix = (float*)malloc(pt_size);
*Rmatrix = (float*)malloc(pt_size);
memset(*Lmatrix, 0, pt_size);
memset(*Rmatrix, 0, pt_size);
for (int i = 0; i < LdimX; i++) {
for (int j = 0; j < LdimY; j++) {
int dummy = size * i + j;
(*Lmatrix)[dummy] = sinf(dummy);
}
}
for (int i = 0; i < RdimX; i++) {
for (int j = 0; j < RdimY; j++) {
int dummy = size * i + j;
(*Rmatrix)[dummy] = cosf(dummy);
}
}
return size;
}
// Kernel that executes on the CUDA device
/* left: left operand
* right: right operand
* res : result array
* dim: M dimension of MxM matrix
* Blok_size: defines block size
*
* this function divides the matrices to tiles and load those tiles to shared memory
* After loading to shared memory it function multiplies with the corresponding tile of other matrix
* After finishing multiplication of 1 row and 1 column by collecting results of different tiles
* it stores the result in global memory
* Function has coalesced access to the global memory and prevent bank conflict
*/
__global__ void multiply(float* left, float* right, float* res, int dim) {
int i, j;
float temp = 0;
__shared__ float Left_shared_t[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
// Row i of matrix left
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
// Column j of matrix left
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
// Load left[i][j] to shared mem
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];// Coalesced access
// Load right[i][j] to shared mem
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col]; // Coalesced access
// Synchronize before computation
__syncthreads();
// Accumulate one tile of res from tiles of left and right in shared mem
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x]; //no shared memory bank conflict
}
// Synchronize
__syncthreads();
}
// Store accumulated value to res
res[row * dim + col] = temp;
}
// main routine that executes on the host
int main(void)
{
//size of the vectors to be processed and matrix dimensions
int Left_matrix_x, Left_matrix_y, Right_matrix_x, Right_matrix_y, Left_vector_size, Right_vector_size;
float* Left_Vector_h, * Right_Vector_h, * Left_Vector_d, * Right_Vector_d, * Res_h, * Res_d, * CPU; // Pointer to host & device arrays
printf("Enter m n n k :\n");
scanf("%d %d %d %d", &Left_matrix_x, &Left_matrix_y, &Right_matrix_x, &Right_matrix_y); // input matrix dimensions are taken
int dim = fill(&Left_Vector_h, &Right_Vector_h, Left_matrix_x, Left_matrix_y, Right_matrix_x, Right_matrix_y); //fills the matrices with random values
print_matrices(Left_Vector_h, "Input_LHS", Left_matrix_x, Left_matrix_y, dim);
print_matrices(Right_Vector_h, "Input_RHS", Right_matrix_x, Right_matrix_y, dim);
size_t vector_size;
vector_size = dim * dim * sizeof(float);
Res_h = (float*)malloc(vector_size); // Allocate array on host for result
CPU = (float*)malloc(vector_size);// Allocate array on host for CPU_matrix_multiplication result
//@@ Allocate GPE Memory here for Left Vector, Right Vector and Result
cudaMalloc((void**)&Left_Vector_d, vector_size);
cudaMalloc((void**)&Right_Vector_d, vector_size);
cudaMalloc((void**)&Res_d, vector_size);
//@@ Copy memory to the GPU here
cudaMemcpy(Left_Vector_d, Left_Vector_h, vector_size, cudaMemcpyHostToDevice);
cudaMemcpy(Right_Vector_d, Right_Vector_h, vector_size, cudaMemcpyHostToDevice);
cudaMemcpy(Res_d, Res_h, vector_size, cudaMemcpyHostToDevice);
//Block dimension is directly from block_size
dim3 Block_dim(BLOCK_SIZE, BLOCK_SIZE);
//Grid dimension is found by dividing matrix dimension to block_size
dim3 Grid_dim(dim / BLOCK_SIZE, dim / BLOCK_SIZE);
//commented out the functions which helps to calculate time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//@@ kernel call
multiply << <Grid_dim, Block_dim >> > (Left_Vector_d, Right_Vector_d, Res_d, dim);
//commented out the functions which helps to calculate time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float et;
cudaEventElapsedTime(&et, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Retrieve result from device and store it in host array
cudaMemcpy(Res_h, Res_d, vector_size, cudaMemcpyDeviceToHost);
clock_t begin = clock();
cpu_matrix_mult(Left_Vector_h, Right_Vector_h, CPU, dim); //matrix multiplication on cpu
clock_t end = clock();
double time_spent = (double)1000 * (end - begin) / CLOCKS_PER_SEC;
//commented out the functions which helps to calculate time
printf("GPU time= %f ms\n", et);
printf("CPU time= %lf ms\n", time_spent);
//Prints the results
print_matrices(Res_h, "GPU_out", Left_matrix_x, Right_matrix_y, dim);
print_matrices(CPU, "CPU_out", Left_matrix_x, Right_matrix_y, dim);
bool eqaul = true;
for (int i = 0; i < Left_matrix_x && eqaul; i++) {
for (int j = 0; j < Right_matrix_y && eqaul; j++) {
if (abs(Res_h[i * dim + j] - CPU[i * dim + j]) > 0.001)
{
eqaul = false;
printf("NOT EQUAL\n");
}
}
}
if (eqaul)
{
std::cout << "Results are equal!" << std::endl;
}
else
{
std::cout << "Results are NOT equal!" << std::endl;
}
//@@ Cleanup
cudaFree(Left_Vector_d);
cudaFree(Right_Vector_d);
cudaFree(Res_d);
} |
52f2597283dcadd49074293c10ef9ba7f955fefd.hip | // !!! This is a file automatically generated by hipify!!!
#include "..\Prerequisites.h"
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
mxInitGPU();
if (nrhs != 1 || !mxIsComplex(prhs[0]))
mexErrMsgIdAndTxt(errId, errMsg);
mxArrayAdapter A(prhs[0]);
int ndims = mxGetNumberOfDimensions(A.underlyingarray);
if (ndims < 1 || ndims > 3)
mexErrMsgIdAndTxt(errId, errMsg);
int3 dimensions = MWDimsToInt3(ndims, mxGetDimensions(A.underlyingarray));
tcomplex* d_result;
hipMalloc((void**)&d_result, dimensions.x * dimensions.y * dimensions.z * sizeof(tcomplex));
d_IFFTC2C(A.GetAsManagedDeviceTComplex(), d_result, ndims, dimensions);
mwSize realdims[3] = { dimensions.x, dimensions.y, dimensions.z };
mxArrayAdapter B(mxCreateNumericArray(mxGetNumberOfDimensions(A.underlyingarray),
realdims,
mxGetClassID(A.underlyingarray),
mxCOMPLEX));
B.SetFromDeviceTComplex(d_result);
plhs[0] = B.underlyingarray;
hipFree(d_result);
} | 52f2597283dcadd49074293c10ef9ba7f955fefd.cu | #include "..\Prerequisites.h"
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
char const * const errId = "parallel:gpu:mexGPUExample:InvalidInput";
char const * const errMsg = "Invalid input to MEX file.";
mxInitGPU();
if (nrhs != 1 || !mxIsComplex(prhs[0]))
mexErrMsgIdAndTxt(errId, errMsg);
mxArrayAdapter A(prhs[0]);
int ndims = mxGetNumberOfDimensions(A.underlyingarray);
if (ndims < 1 || ndims > 3)
mexErrMsgIdAndTxt(errId, errMsg);
int3 dimensions = MWDimsToInt3(ndims, mxGetDimensions(A.underlyingarray));
tcomplex* d_result;
cudaMalloc((void**)&d_result, dimensions.x * dimensions.y * dimensions.z * sizeof(tcomplex));
d_IFFTC2C(A.GetAsManagedDeviceTComplex(), d_result, ndims, dimensions);
mwSize realdims[3] = { dimensions.x, dimensions.y, dimensions.z };
mxArrayAdapter B(mxCreateNumericArray(mxGetNumberOfDimensions(A.underlyingarray),
realdims,
mxGetClassID(A.underlyingarray),
mxCOMPLEX));
B.SetFromDeviceTComplex(d_result);
plhs[0] = B.underlyingarray;
cudaFree(d_result);
} |
95665e50f9dd604e69901a992c1d0745f5df4054.hip | // !!! This is a file automatically generated by hipify!!!
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <time.h>
#include "fixedPointCompress.h"
#include <hip/hip_runtime_api.h>
//#define M_SIZE 1048576
#define RANDO_MIN -10
#define RANDO_MAX 10
#define BATCH_SIZE 64 //Compressed elements per batch
#define BATCHES_PER_BLOCK 8
#define NOT_COMP 0
#define COMP 1
#define GPU_COMP 2
#define TEST_MAT 1
//each thread processes a block
__global__ void decompress_fixed24_8_gpu(uint8_t* in, unsigned* pointers, unsigned len, fixed_point24_8* out, uint32_t batchSize, int numBatches) {
__shared__ uint8_t schemes[BATCHES_PER_BLOCK];
__shared__ unsigned startPos[BATCHES_PER_BLOCK];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < len){
int batchIdx = idx % batchSize;
int myBatch = ((float)idx/(float)len)* numBatches;
int localBatchNum = myBatch%BATCHES_PER_BLOCK; //perBlock
//rep thread gets compression scheme
if(batchIdx == 0){
startPos[localBatchNum] = pointers[myBatch];
//schemes[localBatchNum] = in[startPos[localBatchNum]]; //TODO BREAKS
}
__syncthreads();
//copying results
unsigned myStart = startPos[localBatchNum];
out[idx].data = (int) (int16_t)(in[myStart + 1 + 2*batchIdx] << 8 | in[myStart + 1 + 2*batchIdx + 1]);
}
}
__global__ void decompress_fixed24_8_gpu_dummy(uint8_t* in, unsigned len, fixed_point24_8* out, uint32_t batchSize){
return;
}
//determining size of each batch and writing into pointers
__global__ void compress_fixed24_8_gpu_histo(fixed_point24_8* in, unsigned len, uint32_t batchSize,uint32_t numBatches, unsigned* pointers){
int elemIdx = threadIdx.x + blockIdx.x*blockDim.x;
int batchIdx = ((float)elemIdx /(float)len)* numBatches;
int mydata = in[elemIdx].data >> 8;
if( fabsf(mydata) >= powf (2,7) -1){
pointers[batchIdx]++;
}
}
//each batch then finds its starting point
//TODO make sure to zero pointers
__global__ void compress_fixed24_8_gpu_scan(fixed_point24_8* in, unsigned len, uint8_t* out, uint32_t batchSize,unsigned* pointers){
int batchIdx = threadIdx.x + blockIdx.x*blockDim.x;
//Collboarative loading into shared memory? also thread coarsening
uint8_t myScheme = pointers[batchIdx] >0 ? 3 : 0;
int sum = 0;
for (int i = 0; i < batchIdx; i++){
if (pointers[i] == 0){
sum += (1 + batchSize*2);
}
else{
sum += (1 + batchSize*4);
}
}
out[sum] = myScheme;
pointers[batchIdx] = sum;
}
//compress the data in parallel
__global__ void compress_fixed24_8_gpu_compress(fixed_point24_8* in, unsigned len, uint8_t* out, uint32_t batchSize, uint32_t numBatches,unsigned* pointers){
__shared__ uint8_t schemes[BATCHES_PER_BLOCK];
__shared__ unsigned startPos[BATCHES_PER_BLOCK];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < len){
int batchIdx = idx % batchSize;
int myBatch = ((float)idx/(float)len)* numBatches;
int localBatchNum = myBatch%BATCHES_PER_BLOCK; //perBlock
//rep thread gets compression scheme
if(batchIdx == 0){
startPos[localBatchNum] = pointers[myBatch];
//schemes[localBatchNum] = in[startPos[localBatchNum]]; //TODO BREAKS
}
__syncthreads();
//compressing values
unsigned myStart = startPos[localBatchNum];
uint16_t myValue = mask16_16 & (in[idx].data);
memcpy(&out[myStart+ 1 + batchIdx * 2] , &myValue, 2);
}
}
void gpuCompression(fixed_point24_8* in, unsigned len, uint32_t batchSize,uint32_t numBatches, unsigned* pointers, uint8_t* out){
int xWidth = BATCH_SIZE * BATCHES_PER_BLOCK; int yWidth =1;
int numXBlocks = ceil((float)len/(float)xWidth); int numYBlocks = 1;
dim3 dimGrid(numXBlocks, numYBlocks,1);
dim3 dimBlock(xWidth, yWidth,1);
//(fixed_point24_8* in, unsigned len, uint32_t batchSize,uint32_t numBatches, unsigned* pointers){
//histo
hipLaunchKernelGGL(( compress_fixed24_8_gpu_histo), dim3(dimGrid),dim3(dimBlock), 0, 0, in, len, batchSize, numBatches , pointers);
hipDeviceSynchronize();
//scan
hipLaunchKernelGGL(( compress_fixed24_8_gpu_scan), dim3(dimGrid),dim3(dimBlock), 0, 0, in, len, out, batchSize , pointers);
hipDeviceSynchronize();
//compress
hipLaunchKernelGGL(( compress_fixed24_8_gpu_compress), dim3(dimGrid),dim3(dimBlock), 0, 0, in, len, out, batchSize , numBatches, pointers);
hipDeviceSynchronize();
//copying back
}
void writeFloats(int num){
std::ofstream f("out_floats.raw");
f << num << "\n";
srand( time(NULL) );
for (int idx = 0; idx < num; idx++){
float currRand = RANDO_MIN + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(RANDO_MAX-RANDO_MIN)));
f << currRand << "\n";
}
f.close();
}
int run(int size, int moder) {
//Defnies
int M_SIZE = pow(2, size);
int mode = moder;
/*Allocations and declarations*/
//Host
bool match;
int numBytes;
int numXBlocks,numYBlocks,xWidth,yWidth;
double timeSpent;
clock_t begin, end;
fixed_point24_8* in;
fixed_point24_8* in_decompressed;
uint8_t* in_compressed;
unsigned* pointers;
int bytes;
//Device
uint8_t* in_compressed_D;
fixed_point24_8* in_uncompressed_D;
unsigned* pointers_D;
fixed_point24_8* out_decompressed_D;
uint8_t* out_compressed_D;
//static vars
int numBatches = ceil((float)M_SIZE / (float)BATCH_SIZE);
int worstCaseBytes = M_SIZE*(sizeof(fixed_point24_8) + 1) + numBatches*sizeof(unsigned);
hipProfilerStart();
/*Allocating host space for data */
in = (fixed_point24_8*) malloc(sizeof(fixed_point24_8)*M_SIZE);
in_decompressed = (fixed_point24_8*) malloc(sizeof(fixed_point24_8)*M_SIZE);
in_compressed = (uint8_t*) malloc(worstCaseBytes);
pointers = (unsigned*) malloc((numBatches) * sizeof(unsigned));
//creating random values
srand( time(NULL) );
for (int idx = 0; idx < M_SIZE; idx++){
float currRand = RANDO_MIN + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(RANDO_MAX-RANDO_MIN)));
in[idx].insert(currRand);
//printf("Val :%f\n", currRand);
}
/*Allocating GPU data arrays*/
hipMalloc((void **)&in_compressed_D, worstCaseBytes);
hipMalloc((void **)&pointers_D, numBatches*sizeof(unsigned));
hipMalloc((void **)&out_decompressed_D, sizeof(fixed_point24_8)*M_SIZE);
hipMalloc((void **)&in_uncompressed_D, sizeof(fixed_point24_8)*M_SIZE);
hipMalloc((void **)&out_compressed_D, worstCaseBytes);
if(mode == COMP){
//printf("Beginning Compression Mode\n");
/*Beginning cpu timer */
begin = clock();
numBytes = compressFixed24_8(in,M_SIZE,0,BATCH_SIZE,in_compressed,pointers);
end = clock();
timeSpent = ((double)(end - begin)* 1000.0 )/ (CLOCKS_PER_SEC);
//printf("Compression Time: %f ms\n", timeSpent);
//begin = clock();
//looking at pointers
/*
for( int i = 0; i< numBatches; i++){
printf("BatchNumber = %d | pointer = %d\n",i,pointers[i]);
}
return 0;
*/
//comparing matricies
/*
int retVal = decompressFixed24_8(in_compressed, pointers, M_SIZE, in_decompressed, BATCH_SIZE);
for(int i = 0; i < M_SIZE; i++){
int sub = in[i].data - in_decompressed[i].data;
//printf("i=%d| %d - %d = %d\n", i, in[i].data, in_decompressed[i].data, sub);
if(sub != 0){
printf("ERROROROROROR\n");
return -1;
}
}
printf("Matricies match for cpu!!!\n");
return;
//*/
/*Copying host to device*/
//printf("Number of bytes to copy = %d\n",numBytes);
//printf("Number of batches = %d\n",numBatches);
hipMemcpy(in_compressed_D, in_compressed, numBytes, hipMemcpyHostToDevice);
hipMemcpy(pointers_D, pointers, numBatches*sizeof(unsigned), hipMemcpyHostToDevice);
hipMemset(out_decompressed_D,0, M_SIZE*sizeof(fixed_point24_8));
/*Launching kernel*/
xWidth = BATCH_SIZE * BATCHES_PER_BLOCK; yWidth =1;
numXBlocks = ceil((float)M_SIZE/(float)xWidth); numYBlocks = 1;
//printf("xWidth = %d\n",xWidth);
//printf("numXBlocks = %d\n",numXBlocks);
hipLaunchKernelGGL(( decompress_fixed24_8_gpu), dim3(numXBlocks),dim3(xWidth), 0, 0, in_compressed_D, pointers_D, M_SIZE, out_decompressed_D, BATCH_SIZE, numBatches);
hipDeviceSynchronize();
/*Ending Timer*/
end = clock();
timeSpent = ((double)(end - begin)* 1000.0 )/ (CLOCKS_PER_SEC);
bytes = numBytes + sizeof(unsigned)*numBatches;
printf("Compressed Kernel: %f ms| and occupied %d bytes\n", timeSpent, bytes);
/*Copying memory back*/
hipMemcpy(in_decompressed, out_decompressed_D , M_SIZE*sizeof(fixed_point24_8), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
}
else if(mode == NOT_COMP){
//printf("Beginning UnCompressed Mode\n");
/*Beginning cpu timer */
begin = clock();
/*Copying host to device*/
hipMemcpy(in_compressed_D, in, M_SIZE*sizeof(fixed_point24_8), hipMemcpyHostToDevice); // remember this is the uncompresbatchIdxsed array
hipMemset(out_decompressed_D,0, M_SIZE*sizeof(fixed_point24_8)); //TODO Check if writing output array is necessary
/*Launching kernel*/
numXBlocks = 1;numYBlocks = 1;
xWidth = 1;yWidth =1;
dim3 dimGrid(numXBlocks, numYBlocks,1);
dim3 dimBlock(xWidth, yWidth,1);
hipLaunchKernelGGL(( decompress_fixed24_8_gpu_dummy), dim3(dimGrid),dim3(dimBlock), 0, 0, in_compressed_D, numBytes, out_decompressed_D, BATCH_SIZE);
hipDeviceSynchronize();
/*Ending Timer*/
end = clock();
timeSpent = ((double)(end - begin)* 1000.0 )/ (CLOCKS_PER_SEC);
bytes = M_SIZE*sizeof(fixed_point24_8);
printf("Total Time(No compression): %f ms | and occupied %d bytes\n", timeSpent, bytes);
/*Copying memory back*/
hipMemcpy(in_decompressed, out_decompressed_D , M_SIZE*sizeof(fixed_point24_8), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
}
else if(mode == GPU_COMP){
printf("Beginning GPU UnCompressed Mode\n");
//unsigned * compressedSize;
//hipMalloc(&compressedSize, sizeof(unsigned*));
/*Copying host to device*/
hipMemcpy(in_compressed_D, in, M_SIZE*sizeof(fixed_point24_8), hipMemcpyHostToDevice); // remember this is the uncompresbatchIdxsed array
//hipMemset(out_decompressed_D,0, M_SIZE*sizeof(fixed_point24_8)); //TODO Check if writing output array is necessary
/*Beginning cpu timer */
begin = clock();
/*Launching kernel*/
gpuCompression(in_uncompressed_D, M_SIZE, BATCH_SIZE, numBatches, pointers_D, out_compressed_D);
/*Copying memory back*/
hipMemcpy(in_compressed, out_compressed_D , numBatches*(BATCH_SIZE*2 + 1), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
/*Ending Timer*/
end = clock();
timeSpent = ((double)(end - begin)* 1000.0 )/ (CLOCKS_PER_SEC);
bytes = M_SIZE*sizeof(fixed_point24_8);
printf("Total Time(No compression): %f ms | and occupied %d bytes\n", timeSpent, bytes);
}
/*Checking valid decompressed data*/
match = 1;
for(int i =0; i < M_SIZE && TEST_MAT == 1; i++){
if(in_decompressed[i].data != in[i].data){
//printf("MSIZE is %d\n", M_SIZE);
//printf("i=%d|Difference with %x and %x\n",i,in_decompressed[i].data, in[i].data);
//printf("i=%d|Value %d\n",i,in_decompressed[i].data);
match = 0;
}
}
if(match){
printf("Matricies match\n");
}
else{
printf("Dont Match\n");
}
//Writing output to a file
if (mode == COMP){
//FILE* f = fopen("out_comp.csv", "a+");
//printf(f,"%f,",timeSpent);
//fclose(f);
std::ofstream f("out_comp.csv", std::ofstream::out | std::ofstream::app);
f << timeSpent << ",";
f.close();
}
else if (mode == NOT_COMP){
std::ofstream f("out_nocomp.csv", std::ofstream::out | std::ofstream::app);
f << timeSpent << ",";
f.close();
}
else{
std::ofstream f("out_gpucomp.csv", std::ofstream::out | std::ofstream::app);
f << timeSpent << ",";
f.close();
}
hipProfilerStop();
/*Freeing memory*/
//Host
free(in);
free(in_decompressed);
free(in_compressed);
free(pointers);
//Device
hipFree(in_compressed_D);
hipFree(pointers_D);
hipFree(out_decompressed_D);
//}
//printf("Finished\n");
return 0;
}
//0 no compress | 1 compress
int main(){
std::ofstream f("out_comp.csv");
f << " Compressed Kernel,";
f.close();
std::ofstream f2("out_nocomp.csv");
f2 << " Uncompressed Kernel,";
f2.close();
std::ofstream f3("out_gpucomp.csv");
f3 << " GPU compressed Kernel,";
f3.close();
for(int i = 1; i< 25; i++){
printf("----------------( %d )-------------\n",i);
run(i,0);
run(i,1);
run(i,2);
}
}
| 95665e50f9dd604e69901a992c1d0745f5df4054.cu | #include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <time.h>
#include "fixedPointCompress.h"
#include <cuda_profiler_api.h>
//#define M_SIZE 1048576
#define RANDO_MIN -10
#define RANDO_MAX 10
#define BATCH_SIZE 64 //Compressed elements per batch
#define BATCHES_PER_BLOCK 8
#define NOT_COMP 0
#define COMP 1
#define GPU_COMP 2
#define TEST_MAT 1
//each thread processes a block
__global__ void decompress_fixed24_8_gpu(uint8_t* in, unsigned* pointers, unsigned len, fixed_point24_8* out, uint32_t batchSize, int numBatches) {
__shared__ uint8_t schemes[BATCHES_PER_BLOCK];
__shared__ unsigned startPos[BATCHES_PER_BLOCK];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < len){
int batchIdx = idx % batchSize;
int myBatch = ((float)idx/(float)len)* numBatches;
int localBatchNum = myBatch%BATCHES_PER_BLOCK; //perBlock
//rep thread gets compression scheme
if(batchIdx == 0){
startPos[localBatchNum] = pointers[myBatch];
//schemes[localBatchNum] = in[startPos[localBatchNum]]; //TODO BREAKS
}
__syncthreads();
//copying results
unsigned myStart = startPos[localBatchNum];
out[idx].data = (int) (int16_t)(in[myStart + 1 + 2*batchIdx] << 8 | in[myStart + 1 + 2*batchIdx + 1]);
}
}
__global__ void decompress_fixed24_8_gpu_dummy(uint8_t* in, unsigned len, fixed_point24_8* out, uint32_t batchSize){
return;
}
//determining size of each batch and writing into pointers
__global__ void compress_fixed24_8_gpu_histo(fixed_point24_8* in, unsigned len, uint32_t batchSize,uint32_t numBatches, unsigned* pointers){
int elemIdx = threadIdx.x + blockIdx.x*blockDim.x;
int batchIdx = ((float)elemIdx /(float)len)* numBatches;
int mydata = in[elemIdx].data >> 8;
if( fabsf(mydata) >= powf (2,7) -1){
pointers[batchIdx]++;
}
}
//each batch then finds its starting point
//TODO make sure to zero pointers
__global__ void compress_fixed24_8_gpu_scan(fixed_point24_8* in, unsigned len, uint8_t* out, uint32_t batchSize,unsigned* pointers){
int batchIdx = threadIdx.x + blockIdx.x*blockDim.x;
//Collboarative loading into shared memory? also thread coarsening
uint8_t myScheme = pointers[batchIdx] >0 ? 3 : 0;
int sum = 0;
for (int i = 0; i < batchIdx; i++){
if (pointers[i] == 0){
sum += (1 + batchSize*2);
}
else{
sum += (1 + batchSize*4);
}
}
out[sum] = myScheme;
pointers[batchIdx] = sum;
}
//compress the data in parallel
__global__ void compress_fixed24_8_gpu_compress(fixed_point24_8* in, unsigned len, uint8_t* out, uint32_t batchSize, uint32_t numBatches,unsigned* pointers){
__shared__ uint8_t schemes[BATCHES_PER_BLOCK];
__shared__ unsigned startPos[BATCHES_PER_BLOCK];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < len){
int batchIdx = idx % batchSize;
int myBatch = ((float)idx/(float)len)* numBatches;
int localBatchNum = myBatch%BATCHES_PER_BLOCK; //perBlock
//rep thread gets compression scheme
if(batchIdx == 0){
startPos[localBatchNum] = pointers[myBatch];
//schemes[localBatchNum] = in[startPos[localBatchNum]]; //TODO BREAKS
}
__syncthreads();
//compressing values
unsigned myStart = startPos[localBatchNum];
uint16_t myValue = mask16_16 & (in[idx].data);
memcpy(&out[myStart+ 1 + batchIdx * 2] , &myValue, 2);
}
}
void gpuCompression(fixed_point24_8* in, unsigned len, uint32_t batchSize,uint32_t numBatches, unsigned* pointers, uint8_t* out){
int xWidth = BATCH_SIZE * BATCHES_PER_BLOCK; int yWidth =1;
int numXBlocks = ceil((float)len/(float)xWidth); int numYBlocks = 1;
dim3 dimGrid(numXBlocks, numYBlocks,1);
dim3 dimBlock(xWidth, yWidth,1);
//(fixed_point24_8* in, unsigned len, uint32_t batchSize,uint32_t numBatches, unsigned* pointers){
//histo
compress_fixed24_8_gpu_histo<<<dimGrid,dimBlock>>>(in, len, batchSize, numBatches , pointers);
cudaDeviceSynchronize();
//scan
compress_fixed24_8_gpu_scan<<<dimGrid,dimBlock>>>(in, len, out, batchSize , pointers);
cudaDeviceSynchronize();
//compress
compress_fixed24_8_gpu_compress<<<dimGrid,dimBlock>>>(in, len, out, batchSize , numBatches, pointers);
cudaDeviceSynchronize();
//copying back
}
void writeFloats(int num){
std::ofstream f("out_floats.raw");
f << num << "\n";
srand( time(NULL) );
for (int idx = 0; idx < num; idx++){
float currRand = RANDO_MIN + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(RANDO_MAX-RANDO_MIN)));
f << currRand << "\n";
}
f.close();
}
int run(int size, int moder) {
//Defnies
int M_SIZE = pow(2, size);
int mode = moder;
/*Allocations and declarations*/
//Host
bool match;
int numBytes;
int numXBlocks,numYBlocks,xWidth,yWidth;
double timeSpent;
clock_t begin, end;
fixed_point24_8* in;
fixed_point24_8* in_decompressed;
uint8_t* in_compressed;
unsigned* pointers;
int bytes;
//Device
uint8_t* in_compressed_D;
fixed_point24_8* in_uncompressed_D;
unsigned* pointers_D;
fixed_point24_8* out_decompressed_D;
uint8_t* out_compressed_D;
//static vars
int numBatches = ceil((float)M_SIZE / (float)BATCH_SIZE);
int worstCaseBytes = M_SIZE*(sizeof(fixed_point24_8) + 1) + numBatches*sizeof(unsigned);
cudaProfilerStart();
/*Allocating host space for data */
in = (fixed_point24_8*) malloc(sizeof(fixed_point24_8)*M_SIZE);
in_decompressed = (fixed_point24_8*) malloc(sizeof(fixed_point24_8)*M_SIZE);
in_compressed = (uint8_t*) malloc(worstCaseBytes);
pointers = (unsigned*) malloc((numBatches) * sizeof(unsigned));
//creating random values
srand( time(NULL) );
for (int idx = 0; idx < M_SIZE; idx++){
float currRand = RANDO_MIN + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(RANDO_MAX-RANDO_MIN)));
in[idx].insert(currRand);
//printf("Val :%f\n", currRand);
}
/*Allocating GPU data arrays*/
cudaMalloc((void **)&in_compressed_D, worstCaseBytes);
cudaMalloc((void **)&pointers_D, numBatches*sizeof(unsigned));
cudaMalloc((void **)&out_decompressed_D, sizeof(fixed_point24_8)*M_SIZE);
cudaMalloc((void **)&in_uncompressed_D, sizeof(fixed_point24_8)*M_SIZE);
cudaMalloc((void **)&out_compressed_D, worstCaseBytes);
if(mode == COMP){
//printf("Beginning Compression Mode\n");
/*Beginning cpu timer */
begin = clock();
numBytes = compressFixed24_8(in,M_SIZE,0,BATCH_SIZE,in_compressed,pointers);
end = clock();
timeSpent = ((double)(end - begin)* 1000.0 )/ (CLOCKS_PER_SEC);
//printf("Compression Time: %f ms\n", timeSpent);
//begin = clock();
//looking at pointers
/*
for( int i = 0; i< numBatches; i++){
printf("BatchNumber = %d | pointer = %d\n",i,pointers[i]);
}
return 0;
*/
//comparing matricies
/*
int retVal = decompressFixed24_8(in_compressed, pointers, M_SIZE, in_decompressed, BATCH_SIZE);
for(int i = 0; i < M_SIZE; i++){
int sub = in[i].data - in_decompressed[i].data;
//printf("i=%d| %d - %d = %d\n", i, in[i].data, in_decompressed[i].data, sub);
if(sub != 0){
printf("ERROROROROROR\n");
return -1;
}
}
printf("Matricies match for cpu!!!\n");
return;
//*/
/*Copying host to device*/
//printf("Number of bytes to copy = %d\n",numBytes);
//printf("Number of batches = %d\n",numBatches);
cudaMemcpy(in_compressed_D, in_compressed, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy(pointers_D, pointers, numBatches*sizeof(unsigned), cudaMemcpyHostToDevice);
cudaMemset(out_decompressed_D,0, M_SIZE*sizeof(fixed_point24_8));
/*Launching kernel*/
xWidth = BATCH_SIZE * BATCHES_PER_BLOCK; yWidth =1;
numXBlocks = ceil((float)M_SIZE/(float)xWidth); numYBlocks = 1;
//printf("xWidth = %d\n",xWidth);
//printf("numXBlocks = %d\n",numXBlocks);
decompress_fixed24_8_gpu<<<numXBlocks,xWidth>>>(in_compressed_D, pointers_D, M_SIZE, out_decompressed_D, BATCH_SIZE, numBatches);
cudaDeviceSynchronize();
/*Ending Timer*/
end = clock();
timeSpent = ((double)(end - begin)* 1000.0 )/ (CLOCKS_PER_SEC);
bytes = numBytes + sizeof(unsigned)*numBatches;
printf("Compressed Kernel: %f ms| and occupied %d bytes\n", timeSpent, bytes);
/*Copying memory back*/
cudaMemcpy(in_decompressed, out_decompressed_D , M_SIZE*sizeof(fixed_point24_8), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
else if(mode == NOT_COMP){
//printf("Beginning UnCompressed Mode\n");
/*Beginning cpu timer */
begin = clock();
/*Copying host to device*/
cudaMemcpy(in_compressed_D, in, M_SIZE*sizeof(fixed_point24_8), cudaMemcpyHostToDevice); // remember this is the uncompresbatchIdxsed array
cudaMemset(out_decompressed_D,0, M_SIZE*sizeof(fixed_point24_8)); //TODO Check if writing output array is necessary
/*Launching kernel*/
numXBlocks = 1;numYBlocks = 1;
xWidth = 1;yWidth =1;
dim3 dimGrid(numXBlocks, numYBlocks,1);
dim3 dimBlock(xWidth, yWidth,1);
decompress_fixed24_8_gpu_dummy<<<dimGrid,dimBlock>>>(in_compressed_D, numBytes, out_decompressed_D, BATCH_SIZE);
cudaDeviceSynchronize();
/*Ending Timer*/
end = clock();
timeSpent = ((double)(end - begin)* 1000.0 )/ (CLOCKS_PER_SEC);
bytes = M_SIZE*sizeof(fixed_point24_8);
printf("Total Time(No compression): %f ms | and occupied %d bytes\n", timeSpent, bytes);
/*Copying memory back*/
cudaMemcpy(in_decompressed, out_decompressed_D , M_SIZE*sizeof(fixed_point24_8), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
else if(mode == GPU_COMP){
printf("Beginning GPU UnCompressed Mode\n");
//unsigned * compressedSize;
//cudaMalloc(&compressedSize, sizeof(unsigned*));
/*Copying host to device*/
cudaMemcpy(in_compressed_D, in, M_SIZE*sizeof(fixed_point24_8), cudaMemcpyHostToDevice); // remember this is the uncompresbatchIdxsed array
//cudaMemset(out_decompressed_D,0, M_SIZE*sizeof(fixed_point24_8)); //TODO Check if writing output array is necessary
/*Beginning cpu timer */
begin = clock();
/*Launching kernel*/
gpuCompression(in_uncompressed_D, M_SIZE, BATCH_SIZE, numBatches, pointers_D, out_compressed_D);
/*Copying memory back*/
cudaMemcpy(in_compressed, out_compressed_D , numBatches*(BATCH_SIZE*2 + 1), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
/*Ending Timer*/
end = clock();
timeSpent = ((double)(end - begin)* 1000.0 )/ (CLOCKS_PER_SEC);
bytes = M_SIZE*sizeof(fixed_point24_8);
printf("Total Time(No compression): %f ms | and occupied %d bytes\n", timeSpent, bytes);
}
/*Checking valid decompressed data*/
match = 1;
for(int i =0; i < M_SIZE && TEST_MAT == 1; i++){
if(in_decompressed[i].data != in[i].data){
//printf("MSIZE is %d\n", M_SIZE);
//printf("i=%d|Difference with %x and %x\n",i,in_decompressed[i].data, in[i].data);
//printf("i=%d|Value %d\n",i,in_decompressed[i].data);
match = 0;
}
}
if(match){
printf("Matricies match\n");
}
else{
printf("Dont Match\n");
}
//Writing output to a file
if (mode == COMP){
//FILE* f = fopen("out_comp.csv", "a+");
//printf(f,"%f,",timeSpent);
//fclose(f);
std::ofstream f("out_comp.csv", std::ofstream::out | std::ofstream::app);
f << timeSpent << ",";
f.close();
}
else if (mode == NOT_COMP){
std::ofstream f("out_nocomp.csv", std::ofstream::out | std::ofstream::app);
f << timeSpent << ",";
f.close();
}
else{
std::ofstream f("out_gpucomp.csv", std::ofstream::out | std::ofstream::app);
f << timeSpent << ",";
f.close();
}
cudaProfilerStop();
/*Freeing memory*/
//Host
free(in);
free(in_decompressed);
free(in_compressed);
free(pointers);
//Device
cudaFree(in_compressed_D);
cudaFree(pointers_D);
cudaFree(out_decompressed_D);
//}
//printf("Finished\n");
return 0;
}
//0 no compress | 1 compress
int main(){
std::ofstream f("out_comp.csv");
f << " Compressed Kernel,";
f.close();
std::ofstream f2("out_nocomp.csv");
f2 << " Uncompressed Kernel,";
f2.close();
std::ofstream f3("out_gpucomp.csv");
f3 << " GPU compressed Kernel,";
f3.close();
for(int i = 1; i< 25; i++){
printf("----------------( %d )-------------\n",i);
run(i,0);
run(i,1);
run(i,2);
}
}
|
0c1a23180143ad8a752d25e49cc492c5ecc09bc2.hip | // !!! This is a file automatically generated by hipify!!!
/*
============================================================================
Filename : assignment4.c
Author : Arash Pourhabibi
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <fstream>
#include <sys/time.h>
#include <hip/hip_runtime.h>
using namespace std;
#include "utility.h"
void array_process(double *input, double *output, int length, int iterations);
void GPU_array_process(double *input, double *output, int length, int iterations);
int main (int argc, const char *argv[]) {
int length, iterations;
double time;
if (argc != 3) {
cout<<"Invalid input!"<<endl<<"Usage: ./assignment4 <length> <iterations>"<<endl;
return 1;
} else {
length = atoi(argv[1]);
iterations = atoi(argv[2]);
if(length%2!=0)
{
cout<<"Invalid input!"<<endl<<"Array length must be even"<<endl;
return 1;
}
}
//Allocate arrays
double *input = new double[length*length];
double *output = new double[length*length];
//Reset Device
hipDeviceReset();
//Initialize the arrays
init(input, length);
init(output, length);
//Start timer
set_clock();
/*Use either the CPU or the GPU functions*/
//CPU Baseline
//Uncomment the block to use the baseline
/*array_process(input, output, length, iterations);
if(iterations%2==0)
{
double *temp;
temp = input;
input = output;
output = temp;
}*/
//GPU function
GPU_array_process(input, output, length, iterations);
//Stop timer
time = elapsed_time();
//Report time required for n iterations
cout<<"Running the algorithm on "<<length<<" by "<<length<<" array for "<<iterations<<" iteration takes "<<setprecision(4)<<time<<"s"<<endl;
//Save array in filelength
save(output, length);
//Free allocated memory
delete[] input;
delete[] output;
return 0;
}
| 0c1a23180143ad8a752d25e49cc492c5ecc09bc2.cu | /*
============================================================================
Filename : assignment4.c
Author : Arash Pourhabibi
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <fstream>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
#include "utility.h"
void array_process(double *input, double *output, int length, int iterations);
void GPU_array_process(double *input, double *output, int length, int iterations);
int main (int argc, const char *argv[]) {
int length, iterations;
double time;
if (argc != 3) {
cout<<"Invalid input!"<<endl<<"Usage: ./assignment4 <length> <iterations>"<<endl;
return 1;
} else {
length = atoi(argv[1]);
iterations = atoi(argv[2]);
if(length%2!=0)
{
cout<<"Invalid input!"<<endl<<"Array length must be even"<<endl;
return 1;
}
}
//Allocate arrays
double *input = new double[length*length];
double *output = new double[length*length];
//Reset Device
cudaDeviceReset();
//Initialize the arrays
init(input, length);
init(output, length);
//Start timer
set_clock();
/*Use either the CPU or the GPU functions*/
//CPU Baseline
//Uncomment the block to use the baseline
/*array_process(input, output, length, iterations);
if(iterations%2==0)
{
double *temp;
temp = input;
input = output;
output = temp;
}*/
//GPU function
GPU_array_process(input, output, length, iterations);
//Stop timer
time = elapsed_time();
//Report time required for n iterations
cout<<"Running the algorithm on "<<length<<" by "<<length<<" array for "<<iterations<<" iteration takes "<<setprecision(4)<<time<<"s"<<endl;
//Save array in filelength
save(output, length);
//Free allocated memory
delete[] input;
delete[] output;
return 0;
}
|
9b33ed53afb30a69a5e1af071088e471078d7d5a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "update_presynaptic_activities_C_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_recent_presynaptic_activities_C = NULL;
hipMalloc(&d_recent_presynaptic_activities_C, XSIZE*YSIZE);
float *d_time_of_last_spike_to_reach_synapse = NULL;
hipMalloc(&d_time_of_last_spike_to_reach_synapse, XSIZE*YSIZE);
bool *d_stdp = NULL;
hipMalloc(&d_stdp, XSIZE*YSIZE);
float timestep = 1;
float current_time_in_seconds = 1;
size_t total_number_of_synapses = 1;
float synaptic_neurotransmitter_concentration_alpha_C = 2;
float decay_term_tau_C = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
update_presynaptic_activities_C_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_recent_presynaptic_activities_C,d_time_of_last_spike_to_reach_synapse,d_stdp,timestep,current_time_in_seconds,total_number_of_synapses,synaptic_neurotransmitter_concentration_alpha_C,decay_term_tau_C);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
update_presynaptic_activities_C_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_recent_presynaptic_activities_C,d_time_of_last_spike_to_reach_synapse,d_stdp,timestep,current_time_in_seconds,total_number_of_synapses,synaptic_neurotransmitter_concentration_alpha_C,decay_term_tau_C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
update_presynaptic_activities_C_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_recent_presynaptic_activities_C,d_time_of_last_spike_to_reach_synapse,d_stdp,timestep,current_time_in_seconds,total_number_of_synapses,synaptic_neurotransmitter_concentration_alpha_C,decay_term_tau_C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9b33ed53afb30a69a5e1af071088e471078d7d5a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "update_presynaptic_activities_C_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_recent_presynaptic_activities_C = NULL;
cudaMalloc(&d_recent_presynaptic_activities_C, XSIZE*YSIZE);
float *d_time_of_last_spike_to_reach_synapse = NULL;
cudaMalloc(&d_time_of_last_spike_to_reach_synapse, XSIZE*YSIZE);
bool *d_stdp = NULL;
cudaMalloc(&d_stdp, XSIZE*YSIZE);
float timestep = 1;
float current_time_in_seconds = 1;
size_t total_number_of_synapses = 1;
float synaptic_neurotransmitter_concentration_alpha_C = 2;
float decay_term_tau_C = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
update_presynaptic_activities_C_kernel<<<gridBlock,threadBlock>>>(d_recent_presynaptic_activities_C,d_time_of_last_spike_to_reach_synapse,d_stdp,timestep,current_time_in_seconds,total_number_of_synapses,synaptic_neurotransmitter_concentration_alpha_C,decay_term_tau_C);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
update_presynaptic_activities_C_kernel<<<gridBlock,threadBlock>>>(d_recent_presynaptic_activities_C,d_time_of_last_spike_to_reach_synapse,d_stdp,timestep,current_time_in_seconds,total_number_of_synapses,synaptic_neurotransmitter_concentration_alpha_C,decay_term_tau_C);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
update_presynaptic_activities_C_kernel<<<gridBlock,threadBlock>>>(d_recent_presynaptic_activities_C,d_time_of_last_spike_to_reach_synapse,d_stdp,timestep,current_time_in_seconds,total_number_of_synapses,synaptic_neurotransmitter_concentration_alpha_C,decay_term_tau_C);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
17665ed5c14aa2d5c1e5aaf5c0fcdaebd91e20d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h>
#include <THH/THHAtomics.cuh>
#include "cuda_helpers.h"
#include "roi_align_kernel.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
int height,
int width,
T y,
T x,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void roi_align_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
bool aligned,
const T* rois,
T* output) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_rois[1] * spatial_scale - offset;
T roi_start_h = offset_rois[2] * spatial_scale - offset;
T roi_end_w = offset_rois[3] * spatial_scale - offset;
T roi_end_h = offset_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) {
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
// When the grid is empty, output zeros.
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_input, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
output[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
int height,
int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
}
template <typename T>
__global__ void roi_align_backward_kernel_impl(
int nthreads,
const T* grad_output,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
bool aligned,
T* grad_input,
const T* rois,
int n_stride,
int c_stride,
int h_stride,
int w_stride) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_rois[1] * spatial_scale - offset;
T roi_start_h = offset_rois[2] * spatial_scale - offset;
T roi_end_w = offset_rois[3] * spatial_scale - offset;
T roi_end_h = offset_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) {
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_grad_input =
grad_input + ((roi_batch_ind * channels + c) * height * width);
// We need to index the gradient using the tensor strides to access the
// correct values.
int output_offset = n * n_stride + c * c_stride;
const T* offset_grad_output = grad_output + output_offset;
const T grad_output_this_bin =
offset_grad_output[ph * h_stride + pw * w_stride];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = grad_output_this_bin * w1 / count;
T g2 = grad_output_this_bin * w2 / count;
T g3 = grad_output_this_bin * w3 / count;
T g4 = grad_output_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_grad_input + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_grad_input + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_grad_input + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_grad_input + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
}
} // namespace
at::Tensor roi_align_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio,
bool aligned) {
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(rois.size(1) == 5, "rois must have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "roi_align_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
at::Tensor output = at::zeros(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return output;
}
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "roi_align_forward_cuda", [&] {
hipLaunchKernelGGL(( roi_align_forward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
aligned,
rois_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(hipGetLastError());
return output;
}
at::Tensor roi_align_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width,
int64_t sampling_ratio,
bool aligned) {
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "roi_align_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad.device());
at::Tensor grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
dim3 grid(::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
int n_stride = grad.stride(0);
int c_stride = grad.stride(1);
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
auto rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "roi_align_backward_cuda", [&] {
hipLaunchKernelGGL(( roi_align_backward_kernel_impl<scalar_t>), dim3(grid), dim3(block), 0, stream,
grad.numel(),
grad.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
aligned,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>(),
n_stride,
c_stride,
h_stride,
w_stride);
});
AT_CUDA_CHECK(hipGetLastError());
return grad_input;
}
} // namespace ops
} // namespace vision
| 17665ed5c14aa2d5c1e5aaf5c0fcdaebd91e20d4.cu | #include <ATen/cuda/CUDAContext.h>
#include <c10/cuda/CUDAGuard.h>
#include <THC/THCAtomics.cuh>
#include "cuda_helpers.h"
#include "roi_align_kernel.h"
namespace vision {
namespace ops {
namespace {
template <typename T>
__device__ T bilinear_interpolate(
const T* input,
int height,
int width,
T y,
T x,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
return 0;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
int y_low = (int)y;
int x_low = (int)x;
int y_high;
int x_high;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// do bilinear interpolation
T v1 = input[y_low * width + x_low];
T v2 = input[y_low * width + x_high];
T v3 = input[y_high * width + x_low];
T v4 = input[y_high * width + x_high];
T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
return val;
}
template <typename T>
__global__ void roi_align_forward_kernel_impl(
int nthreads,
const T* input,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
bool aligned,
const T* rois,
T* output) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_rois[1] * spatial_scale - offset;
T roi_start_h = offset_rois[2] * spatial_scale - offset;
T roi_end_w = offset_rois[3] * spatial_scale - offset;
T roi_end_h = offset_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) {
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
const T* offset_input =
input + (roi_batch_ind * channels + c) * height * width;
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
// When the grid is empty, output zeros.
const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
T output_val = 0.;
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T val = bilinear_interpolate(offset_input, height, width, y, x, index);
output_val += val;
}
}
output_val /= count;
output[index] = output_val;
}
}
template <typename T>
__device__ void bilinear_interpolate_gradient(
int height,
int width,
T y,
T x,
T& w1,
T& w2,
T& w3,
T& w4,
int& x_low,
int& x_high,
int& y_low,
int& y_high,
int index /* index for debug only*/) {
// deal with cases that inverse elements are out of feature map boundary
if (y < -1.0 || y > height || x < -1.0 || x > width) {
// empty
w1 = w2 = w3 = w4 = 0.;
x_low = x_high = y_low = y_high = -1;
return;
}
if (y <= 0)
y = 0;
if (x <= 0)
x = 0;
y_low = (int)y;
x_low = (int)x;
if (y_low >= height - 1) {
y_high = y_low = height - 1;
y = (T)y_low;
} else {
y_high = y_low + 1;
}
if (x_low >= width - 1) {
x_high = x_low = width - 1;
x = (T)x_low;
} else {
x_high = x_low + 1;
}
T ly = y - y_low;
T lx = x - x_low;
T hy = 1. - ly, hx = 1. - lx;
// reference in forward
// T v1 = input[y_low * width + x_low];
// T v2 = input[y_low * width + x_high];
// T v3 = input[y_high * width + x_low];
// T v4 = input[y_high * width + x_high];
// T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
}
template <typename T>
__global__ void roi_align_backward_kernel_impl(
int nthreads,
const T* grad_output,
const T spatial_scale,
int channels,
int height,
int width,
int pooled_height,
int pooled_width,
int sampling_ratio,
bool aligned,
T* grad_input,
const T* rois,
int n_stride,
int c_stride,
int h_stride,
int w_stride) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
const T* offset_rois = rois + n * 5;
int roi_batch_ind = offset_rois[0];
// Do not using rounding; this implementation detail is critical
T offset = aligned ? (T)0.5 : (T)0.0;
T roi_start_w = offset_rois[1] * spatial_scale - offset;
T roi_start_h = offset_rois[2] * spatial_scale - offset;
T roi_end_w = offset_rois[3] * spatial_scale - offset;
T roi_end_h = offset_rois[4] * spatial_scale - offset;
T roi_width = roi_end_w - roi_start_w;
T roi_height = roi_end_h - roi_start_h;
if (!aligned) {
// Force malformed ROIs to be 1x1
roi_width = max(roi_width, (T)1.);
roi_height = max(roi_height, (T)1.);
}
T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height);
T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width);
T* offset_grad_input =
grad_input + ((roi_batch_ind * channels + c) * height * width);
// We need to index the gradient using the tensor strides to access the
// correct values.
int output_offset = n * n_stride + c * c_stride;
const T* offset_grad_output = grad_output + output_offset;
const T grad_output_this_bin =
offset_grad_output[ph * h_stride + pw * w_stride];
// We use roi_bin_grid to sample the grid and mimic integral
int roi_bin_grid_h = (sampling_ratio > 0)
? sampling_ratio
: ceil(roi_height / pooled_height); // e.g., = 2
int roi_bin_grid_w =
(sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
// We do average (integral) pooling inside a bin
const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
{
const T y = roi_start_h + ph * bin_size_h +
static_cast<T>(iy + .5f) * bin_size_h /
static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5
for (int ix = 0; ix < roi_bin_grid_w; ix++) {
const T x = roi_start_w + pw * bin_size_w +
static_cast<T>(ix + .5f) * bin_size_w /
static_cast<T>(roi_bin_grid_w);
T w1, w2, w3, w4;
int x_low, x_high, y_low, y_high;
bilinear_interpolate_gradient(
height,
width,
y,
x,
w1,
w2,
w3,
w4,
x_low,
x_high,
y_low,
y_high,
index);
T g1 = grad_output_this_bin * w1 / count;
T g2 = grad_output_this_bin * w2 / count;
T g3 = grad_output_this_bin * w3 / count;
T g4 = grad_output_this_bin * w4 / count;
if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
atomicAdd(
offset_grad_input + y_low * width + x_low, static_cast<T>(g1));
atomicAdd(
offset_grad_input + y_low * width + x_high, static_cast<T>(g2));
atomicAdd(
offset_grad_input + y_high * width + x_low, static_cast<T>(g3));
atomicAdd(
offset_grad_input + y_high * width + x_high, static_cast<T>(g4));
} // if
} // ix
} // iy
} // CUDA_1D_KERNEL_LOOP
}
} // namespace
at::Tensor roi_align_forward_cuda(
const at::Tensor& input,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t sampling_ratio,
bool aligned) {
TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
TORCH_CHECK(rois.size(1) == 5, "rois must have shape as Tensor[K, 5]");
at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "roi_align_forward_cuda";
at::checkAllSameGPU(c, {input_t, rois_t});
at::checkAllSameType(c, {input_t, rois_t});
at::cuda::CUDAGuard device_guard(input.device());
auto num_rois = rois.size(0);
auto channels = input.size(1);
auto height = input.size(2);
auto width = input.size(3);
at::Tensor output = at::zeros(
{num_rois, channels, pooled_height, pooled_width}, input.options());
auto output_size = num_rois * pooled_height * pooled_width * channels;
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
if (output.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
auto input_ = input.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "roi_align_forward_cuda", [&] {
roi_align_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
output_size,
input_.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
aligned,
rois_.data_ptr<scalar_t>(),
output.data_ptr<scalar_t>());
});
AT_CUDA_CHECK(cudaGetLastError());
return output;
}
at::Tensor roi_align_backward_cuda(
const at::Tensor& grad,
const at::Tensor& rois,
double spatial_scale,
int64_t pooled_height,
int64_t pooled_width,
int64_t batch_size,
int64_t channels,
int64_t height,
int64_t width,
int64_t sampling_ratio,
bool aligned) {
TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor");
TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor");
at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
at::CheckedFrom c = "roi_align_backward_cuda";
at::checkAllSameGPU(c, {grad_t, rois_t});
at::checkAllSameType(c, {grad_t, rois_t});
at::cuda::CUDAGuard device_guard(grad.device());
at::Tensor grad_input =
at::zeros({batch_size, channels, height, width}, grad.options());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
dim3 grid(std::min(
ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)),
static_cast<int64_t>(4096)));
dim3 block(512);
// handle possibly empty gradients
if (grad.numel() == 0) {
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
int n_stride = grad.stride(0);
int c_stride = grad.stride(1);
int h_stride = grad.stride(2);
int w_stride = grad.stride(3);
auto rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "roi_align_backward_cuda", [&] {
roi_align_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>(
grad.numel(),
grad.data_ptr<scalar_t>(),
spatial_scale,
channels,
height,
width,
pooled_height,
pooled_width,
sampling_ratio,
aligned,
grad_input.data_ptr<scalar_t>(),
rois_.data_ptr<scalar_t>(),
n_stride,
c_stride,
h_stride,
w_stride);
});
AT_CUDA_CHECK(cudaGetLastError());
return grad_input;
}
} // namespace ops
} // namespace vision
|
8ab447754f18eb68ba59421ae603034b26a43bff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
%%writefile quick.cu
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include<iostream>
#include<fstream>
#include<string>
#include<stdlib.h>
using namespace std;
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 1024
#define INSERTION_SORT 32
#define CUDA_SAFE_CALL(func) \
do { \
hipError_t err = (func); \
if (err != hipSuccess) { \
fprintf(stderr, "[Error] %s (error code: %d) at %s line %d\n", hipGetErrorString(err), err, __FILE__, __LINE__); \
exit(err); \
} \
} while(0)
//
template<typename _arraytype>
__device__
void selection_sort(_arraytype *data, int left, int right)
{
for (long long i = left ; i <= right ; ++i)
{
unsigned min_val = data[i];
long long min_idx = i;
for (long long j = i+1 ; j <= right ; ++j)
{
unsigned val_j = data[j];
if (val_j < min_val)
{
min_idx = j;
min_val = val_j;
}
}
if (i != min_idx)
{
data[min_idx] = data[i];
data[i] = min_val;
}
}
}
template<typename _arraytype>
__global__
void quicksort(_arraytype *data,long long left,long long right,int depth)
{
//
if (depth >= 20 || right-left<=INSERTION_SORT)
{
selection_sort(data,left,right);
return;
}
_arraytype *l=data+left;
_arraytype *r=data+right;
_arraytype pivot=data[(left+right)/2];
while (l<=r)
{
_arraytype lval=*l;
_arraytype rval=*r;
while (lval<pivot)
{
l++;
lval=*l;
}
while (rval>pivot)
{
r--;
rval=*r;
}
if (l<=r)
{
*l++=rval;
*r--=lval;
}
}
long long nright=r-data;
long long nleft=l-data;
if (left<(r-data))
{
//
hipStream_t s;
hipStreamCreateWithFlags(&s, hipStreamNonBlocking);
hipLaunchKernelGGL(( quicksort), dim3(1), dim3(1), 0, s , data, left, nright, depth+1);
//
hipStreamDestroy(s);
}
if ((l-data) < right)
{
//
hipStream_t s1;
hipStreamCreateWithFlags(&s1, hipStreamNonBlocking);
hipLaunchKernelGGL(( quicksort), dim3(1), dim3(1), 0, s1 , data, nleft, right, depth+1);
//
hipStreamDestroy(s1);
}
}
int main(){
ofstream outputfile("quick_result.txt");
for (int i=10;i<=20;i++){
for (int j=0;j<10;j++){
string s="sample2^";
s+=to_string(i);
s+="-";
s+=to_string(j);
s+=".txt";
ifstream ifs(s);
int *A;
int *DA;
clock_t start, stop;
string num;
int size=pow(2,i);
A=(int*)malloc(sizeof(int)*size);//
int i=0;
if (ifs.fail()) {
cout << "Failed to open file." << endl;
return -1;
}
while (getline(ifs, num)) {
A[i]=atoi(num.c_str());
i++;
}
//GPU
hipMalloc((void**)&DA,sizeof(int)*size);
//
start = clock();
//
CUDA_SAFE_CALL(hipMemcpy(DA,A,sizeof(int)*size,hipMemcpyDefault));
hipLaunchKernelGGL(( quicksort) , dim3(1),dim3(1) , 0, 0, DA,0,size-1,0);
//
CUDA_SAFE_CALL(hipMemcpy(A,DA,sizeof(int)*size,hipMemcpyDefault));
stop = clock();
//
hipFree(DA);
for (i=1;i<size;i++){
if (A[i-1]>A[i]){
cout << "______________FALSE_________" << endl;
break;
}
}
free(A);
outputfile << (long double)(stop-start) / CLOCKS_PER_SEC << " " << size << '\n';
}
}
outputfile.close();
return 0;
}
| 8ab447754f18eb68ba59421ae603034b26a43bff.cu | %%writefile quick.cu
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include<iostream>
#include<fstream>
#include<string>
#include<stdlib.h>
using namespace std;
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 1024
#define INSERTION_SORT 32
#define CUDA_SAFE_CALL(func) \
do { \
cudaError_t err = (func); \
if (err != cudaSuccess) { \
fprintf(stderr, "[Error] %s (error code: %d) at %s line %d\n", cudaGetErrorString(err), err, __FILE__, __LINE__); \
exit(err); \
} \
} while(0)
//単純選択ソート
template<typename _arraytype>
__device__
void selection_sort(_arraytype *data, int left, int right)
{
for (long long i = left ; i <= right ; ++i)
{
unsigned min_val = data[i];
long long min_idx = i;
for (long long j = i+1 ; j <= right ; ++j)
{
unsigned val_j = data[j];
if (val_j < min_val)
{
min_idx = j;
min_val = val_j;
}
}
if (i != min_idx)
{
data[min_idx] = data[i];
data[i] = min_val;
}
}
}
template<typename _arraytype>
__global__
void quicksort(_arraytype *data,long long left,long long right,int depth)
{
//選択ソートに切り替える
if (depth >= 20 || right-left<=INSERTION_SORT)
{
selection_sort(data,left,right);
return;
}
_arraytype *l=data+left;
_arraytype *r=data+right;
_arraytype pivot=data[(left+right)/2];
while (l<=r)
{
_arraytype lval=*l;
_arraytype rval=*r;
while (lval<pivot)
{
l++;
lval=*l;
}
while (rval>pivot)
{
r--;
rval=*r;
}
if (l<=r)
{
*l++=rval;
*r--=lval;
}
}
long long nright=r-data;
long long nleft=l-data;
if (left<(r-data))
{
//ストリームの指定
cudaStream_t s;
cudaStreamCreateWithFlags(&s, cudaStreamNonBlocking);
quicksort<<< 1, 1, 0, s >>>(data, left, nright, depth+1);
//非同期ストリームの破棄
cudaStreamDestroy(s);
}
if ((l-data) < right)
{
//ストリームの指定
cudaStream_t s1;
cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking);
quicksort<<< 1, 1, 0, s1 >>>(data, nleft, right, depth+1);
//非同期ストリームの破棄
cudaStreamDestroy(s1);
}
}
int main(){
ofstream outputfile("quick_result.txt");
for (int i=10;i<=20;i++){
for (int j=0;j<10;j++){
string s="sample2^";
s+=to_string(i);
s+="-";
s+=to_string(j);
s+=".txt";
ifstream ifs(s);
int *A;
int *DA;
clock_t start, stop;
string num;
int size=pow(2,i);
A=(int*)malloc(sizeof(int)*size);//配列の領域確保
int i=0;
if (ifs.fail()) {
cout << "Failed to open file." << endl;
return -1;
}
while (getline(ifs, num)) {
A[i]=atoi(num.c_str());
i++;
}
//GPUメモリ確保
cudaMalloc((void**)&DA,sizeof(int)*size);
//ソート実行
start = clock();
//ホストメモリからデバイスメモリ
CUDA_SAFE_CALL(cudaMemcpy(DA,A,sizeof(int)*size,cudaMemcpyDefault));
quicksort <<< 1,1 >>>(DA,0,size-1,0);
//デバイスメモリからホストメモリ
CUDA_SAFE_CALL(cudaMemcpy(A,DA,sizeof(int)*size,cudaMemcpyDefault));
stop = clock();
//デバイスメモリ解放
cudaFree(DA);
for (i=1;i<size;i++){
if (A[i-1]>A[i]){
cout << "______________FALSE_________" << endl;
break;
}
}
free(A);
outputfile << (long double)(stop-start) / CLOCKS_PER_SEC << " " << size << '\n';
}
}
outputfile.close();
return 0;
}
|
3fe8698ca916e9b5959ed51b0c1948c5777ed2a8.hip | // !!! This is a file automatically generated by hipify!!!
#include <cmath>
#include <cstdint>
#include <iostream>
#include <limits>
#include <cufinufft.h>
#include <cufinufft/impl.h>
inline bool is_invalid_mode_array(int dim, int64_t *modes64, int32_t modes32[3]) {
int64_t tot_size = 1;
for (int i = 0; i < dim; ++i) {
if (modes64[i] > std::numeric_limits<int32_t>::max())
return true;
modes32[i] = modes64[i];
tot_size *= modes64[i];
}
for (int i = dim; i < 3; ++i)
modes32[i] = 1;
return tot_size > std::numeric_limits<int32_t>::max();
}
extern "C" {
int cufinufftf_makeplan(int type, int dim, int64_t *nmodes, int iflag, int ntransf, float tol,
cufinufftf_plan *d_plan_ptr, cufinufft_opts *opts) {
int nmodes32[3];
if (is_invalid_mode_array(dim, nmodes, nmodes32))
return ERR_NDATA_NOTVALID;
return cufinufft_makeplan_impl(type, dim, nmodes32, iflag, ntransf, tol, (cufinufft_plan_t<float> **)d_plan_ptr,
opts);
}
int cufinufft_makeplan(int type, int dim, int64_t *nmodes, int iflag, int ntransf, double tol,
cufinufft_plan *d_plan_ptr, cufinufft_opts *opts) {
int nmodes32[3];
if (is_invalid_mode_array(dim, nmodes, nmodes32))
return ERR_NDATA_NOTVALID;
return cufinufft_makeplan_impl(type, dim, nmodes32, iflag, ntransf, tol, (cufinufft_plan_t<double> **)d_plan_ptr,
opts);
}
int cufinufftf_setpts(cufinufftf_plan d_plan, int M, float *d_kx, float *d_ky, float *d_kz, int N, float *d_s,
float *d_t, float *d_u) {
return cufinufft_setpts_impl(M, d_kx, d_ky, d_kz, N, d_s, d_t, d_u, (cufinufft_plan_t<float> *)d_plan);
}
int cufinufft_setpts(cufinufft_plan d_plan, int M, double *d_kx, double *d_ky, double *d_kz, int N, double *d_s,
double *d_t, double *d_u) {
return cufinufft_setpts_impl(M, d_kx, d_ky, d_kz, N, d_s, d_t, d_u, (cufinufft_plan_t<double> *)d_plan);
}
int cufinufftf_execute(cufinufftf_plan d_plan, cuFloatComplex *d_c, cuFloatComplex *d_fk) {
return cufinufft_execute_impl<float>(d_c, d_fk, (cufinufft_plan_t<float> *)d_plan);
}
int cufinufft_execute(cufinufft_plan d_plan, hipDoubleComplex *d_c, cuda_complex<double> *d_fk) {
return cufinufft_execute_impl<double>(d_c, d_fk, (cufinufft_plan_t<double> *)d_plan);
}
int cufinufftf_destroy(cufinufftf_plan d_plan) {
return cufinufft_destroy_impl<float>((cufinufft_plan_t<float> *)d_plan);
}
int cufinufft_destroy(cufinufft_plan d_plan) {
return cufinufft_destroy_impl<double>((cufinufft_plan_t<double> *)d_plan);
}
int cufinufft_default_opts(int type, int dim, cufinufft_opts *opts)
/*
Sets the default options in cufinufft_opts. This must be called
before the user changes any options from default values.
The resulting struct may then be passed (instead of NULL) to the last
argument of cufinufft_plan().
Options with prefix "gpu_" are used for gpu code.
Notes:
Values set in this function for different type and dimensions are preferable
based on experiments. User can experiement with different settings by
replacing them after calling this function.
Melody Shih 07/25/19; Barnett 2/5/21.
*/
{
int ier;
opts->upsampfac = 2.0;
/* following options are for gpu */
opts->gpu_nstreams = 0;
opts->gpu_sort = 1; // access nupts in an ordered way for nupts driven method
opts->gpu_maxsubprobsize = 1024;
opts->gpu_obinsizex = -1;
opts->gpu_obinsizey = -1;
opts->gpu_obinsizez = -1;
opts->gpu_binsizex = -1;
opts->gpu_binsizey = -1;
opts->gpu_binsizez = -1;
opts->gpu_spreadinterponly = 0; // default to do the whole nufft
opts->gpu_maxbatchsize = 0; // Heuristically set
switch (dim) {
case 1: {
opts->gpu_kerevalmeth = 1; // using horner
if (type == 1) {
opts->gpu_method = 2;
}
if (type == 2) {
opts->gpu_method = 1;
}
if (type == 3) {
std::cerr << "Not Implemented yet" << std::endl;
ier = 1;
return ier;
}
} break;
case 2: {
opts->gpu_kerevalmeth = 1; // using horner
if (type == 1) {
opts->gpu_method = 2;
}
if (type == 2) {
opts->gpu_method = 1;
}
if (type == 3) {
std::cerr << "Not Implemented yet" << std::endl;
ier = 1;
return ier;
}
} break;
case 3: {
opts->gpu_kerevalmeth = 1; // using horner
if (type == 1) {
opts->gpu_method = 2;
}
if (type == 2) {
opts->gpu_method = 1;
}
if (type == 3) {
std::cerr << "Not Implemented yet" << std::endl;
ier = 1;
return ier;
}
} break;
}
// By default, only use device 0
opts->gpu_device_id = 0;
return 0;
}
}
| 3fe8698ca916e9b5959ed51b0c1948c5777ed2a8.cu | #include <cmath>
#include <cstdint>
#include <iostream>
#include <limits>
#include <cufinufft.h>
#include <cufinufft/impl.h>
inline bool is_invalid_mode_array(int dim, int64_t *modes64, int32_t modes32[3]) {
int64_t tot_size = 1;
for (int i = 0; i < dim; ++i) {
if (modes64[i] > std::numeric_limits<int32_t>::max())
return true;
modes32[i] = modes64[i];
tot_size *= modes64[i];
}
for (int i = dim; i < 3; ++i)
modes32[i] = 1;
return tot_size > std::numeric_limits<int32_t>::max();
}
extern "C" {
int cufinufftf_makeplan(int type, int dim, int64_t *nmodes, int iflag, int ntransf, float tol,
cufinufftf_plan *d_plan_ptr, cufinufft_opts *opts) {
int nmodes32[3];
if (is_invalid_mode_array(dim, nmodes, nmodes32))
return ERR_NDATA_NOTVALID;
return cufinufft_makeplan_impl(type, dim, nmodes32, iflag, ntransf, tol, (cufinufft_plan_t<float> **)d_plan_ptr,
opts);
}
int cufinufft_makeplan(int type, int dim, int64_t *nmodes, int iflag, int ntransf, double tol,
cufinufft_plan *d_plan_ptr, cufinufft_opts *opts) {
int nmodes32[3];
if (is_invalid_mode_array(dim, nmodes, nmodes32))
return ERR_NDATA_NOTVALID;
return cufinufft_makeplan_impl(type, dim, nmodes32, iflag, ntransf, tol, (cufinufft_plan_t<double> **)d_plan_ptr,
opts);
}
int cufinufftf_setpts(cufinufftf_plan d_plan, int M, float *d_kx, float *d_ky, float *d_kz, int N, float *d_s,
float *d_t, float *d_u) {
return cufinufft_setpts_impl(M, d_kx, d_ky, d_kz, N, d_s, d_t, d_u, (cufinufft_plan_t<float> *)d_plan);
}
int cufinufft_setpts(cufinufft_plan d_plan, int M, double *d_kx, double *d_ky, double *d_kz, int N, double *d_s,
double *d_t, double *d_u) {
return cufinufft_setpts_impl(M, d_kx, d_ky, d_kz, N, d_s, d_t, d_u, (cufinufft_plan_t<double> *)d_plan);
}
int cufinufftf_execute(cufinufftf_plan d_plan, cuFloatComplex *d_c, cuFloatComplex *d_fk) {
return cufinufft_execute_impl<float>(d_c, d_fk, (cufinufft_plan_t<float> *)d_plan);
}
int cufinufft_execute(cufinufft_plan d_plan, cuDoubleComplex *d_c, cuda_complex<double> *d_fk) {
return cufinufft_execute_impl<double>(d_c, d_fk, (cufinufft_plan_t<double> *)d_plan);
}
int cufinufftf_destroy(cufinufftf_plan d_plan) {
return cufinufft_destroy_impl<float>((cufinufft_plan_t<float> *)d_plan);
}
int cufinufft_destroy(cufinufft_plan d_plan) {
return cufinufft_destroy_impl<double>((cufinufft_plan_t<double> *)d_plan);
}
int cufinufft_default_opts(int type, int dim, cufinufft_opts *opts)
/*
Sets the default options in cufinufft_opts. This must be called
before the user changes any options from default values.
The resulting struct may then be passed (instead of NULL) to the last
argument of cufinufft_plan().
Options with prefix "gpu_" are used for gpu code.
Notes:
Values set in this function for different type and dimensions are preferable
based on experiments. User can experiement with different settings by
replacing them after calling this function.
Melody Shih 07/25/19; Barnett 2/5/21.
*/
{
int ier;
opts->upsampfac = 2.0;
/* following options are for gpu */
opts->gpu_nstreams = 0;
opts->gpu_sort = 1; // access nupts in an ordered way for nupts driven method
opts->gpu_maxsubprobsize = 1024;
opts->gpu_obinsizex = -1;
opts->gpu_obinsizey = -1;
opts->gpu_obinsizez = -1;
opts->gpu_binsizex = -1;
opts->gpu_binsizey = -1;
opts->gpu_binsizez = -1;
opts->gpu_spreadinterponly = 0; // default to do the whole nufft
opts->gpu_maxbatchsize = 0; // Heuristically set
switch (dim) {
case 1: {
opts->gpu_kerevalmeth = 1; // using horner
if (type == 1) {
opts->gpu_method = 2;
}
if (type == 2) {
opts->gpu_method = 1;
}
if (type == 3) {
std::cerr << "Not Implemented yet" << std::endl;
ier = 1;
return ier;
}
} break;
case 2: {
opts->gpu_kerevalmeth = 1; // using horner
if (type == 1) {
opts->gpu_method = 2;
}
if (type == 2) {
opts->gpu_method = 1;
}
if (type == 3) {
std::cerr << "Not Implemented yet" << std::endl;
ier = 1;
return ier;
}
} break;
case 3: {
opts->gpu_kerevalmeth = 1; // using horner
if (type == 1) {
opts->gpu_method = 2;
}
if (type == 2) {
opts->gpu_method = 1;
}
if (type == 3) {
std::cerr << "Not Implemented yet" << std::endl;
ier = 1;
return ier;
}
} break;
}
// By default, only use device 0
opts->gpu_device_id = 0;
return 0;
}
}
|
0d61b8e28d0444798add05f8c56f3c2fbe4645d1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "bvh.h"
__device__ AABB surrounding_box(AABB* box_1, AABB* box_2);
__device__ void not_qsort(Hittable** l, int n, int sort_by)
{
for(int i = 0; i < n - 1; i++)
for(int j = i + 1; j < n; j++)
{
AABB box_left, box_right;
l[i]->bounding_box(0, 0, &box_left);
l[j]->bounding_box(0, 0, &box_right);
if(box_left._min[sort_by] > box_right._min[sort_by])
{
Hittable* tmp = l[j];
l[j] = l[i];
l[i] = tmp;
}
}
}
__device__ BVHNode::BVHNode(Hittable** l, int n, float time_0, float time_1, hiprandState_t* state)
{
int axis = int(3.0f * random_float(state));
if (axis == 0)
not_qsort(l, n, 0);
else if (axis == 1)
not_qsort(l, n, 1);
else if (axis == 2)
not_qsort(l, n, 2);
if (n == 1)
{
left = right = l[0];
}
else if (n == 2)
{
left = l[0];
right = l[1];
}
else
{
left = new BVHNode(l, n / 2, time_0, time_1, state);
right = new BVHNode(l + n/2, n - n / 2, time_0, time_1, state);
}
AABB box_left, box_right;
left->bounding_box(time_0, time_1, &box_left);
right->bounding_box(time_0, time_1, &box_right);
box = surrounding_box(&box_left, &box_right);
}
__device__ bool BVHNode::hit(Ray* ray, float t_min, float t_max, HitRecord* rec) const
{
if(box.hit(ray, t_min, t_max))
{
HitRecord left_rec, right_rec;
bool hit_left = left->hit(ray, t_min, t_max, &left_rec);
bool hit_right = right->hit(ray, t_min, t_max, &right_rec);
if(hit_left && hit_right)
{
if(left_rec.t < right_rec.t)
*rec = left_rec;
else
*rec = right_rec;
return true;
}
else if(hit_left)
{
*rec = left_rec;
return true;
}
else if(hit_right)
{
*rec = right_rec;
return true;
}
return false;
}
return false;
}
__device__ bool BVHNode::bounding_box(float t0, float t1, AABB* b) const
{
*b = box;
return true;
} | 0d61b8e28d0444798add05f8c56f3c2fbe4645d1.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bvh.h"
__device__ AABB surrounding_box(AABB* box_1, AABB* box_2);
__device__ void not_qsort(Hittable** l, int n, int sort_by)
{
for(int i = 0; i < n - 1; i++)
for(int j = i + 1; j < n; j++)
{
AABB box_left, box_right;
l[i]->bounding_box(0, 0, &box_left);
l[j]->bounding_box(0, 0, &box_right);
if(box_left._min[sort_by] > box_right._min[sort_by])
{
Hittable* tmp = l[j];
l[j] = l[i];
l[i] = tmp;
}
}
}
__device__ BVHNode::BVHNode(Hittable** l, int n, float time_0, float time_1, curandState_t* state)
{
int axis = int(3.0f * random_float(state));
if (axis == 0)
not_qsort(l, n, 0);
else if (axis == 1)
not_qsort(l, n, 1);
else if (axis == 2)
not_qsort(l, n, 2);
if (n == 1)
{
left = right = l[0];
}
else if (n == 2)
{
left = l[0];
right = l[1];
}
else
{
left = new BVHNode(l, n / 2, time_0, time_1, state);
right = new BVHNode(l + n/2, n - n / 2, time_0, time_1, state);
}
AABB box_left, box_right;
left->bounding_box(time_0, time_1, &box_left);
right->bounding_box(time_0, time_1, &box_right);
box = surrounding_box(&box_left, &box_right);
}
__device__ bool BVHNode::hit(Ray* ray, float t_min, float t_max, HitRecord* rec) const
{
if(box.hit(ray, t_min, t_max))
{
HitRecord left_rec, right_rec;
bool hit_left = left->hit(ray, t_min, t_max, &left_rec);
bool hit_right = right->hit(ray, t_min, t_max, &right_rec);
if(hit_left && hit_right)
{
if(left_rec.t < right_rec.t)
*rec = left_rec;
else
*rec = right_rec;
return true;
}
else if(hit_left)
{
*rec = left_rec;
return true;
}
else if(hit_right)
{
*rec = right_rec;
return true;
}
return false;
}
return false;
}
__device__ bool BVHNode::bounding_box(float t0, float t1, AABB* b) const
{
*b = box;
return true;
} |
5b8b1390c5e69d4f9141c0034714b041dd03df62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Mark Gates
@author Azzam Haidar
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for zlaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd.
*/
static __device__
void zlaset_full_device(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_Z_EQUAL( offdiag, diag )));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
Similar to zlaset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlaset_lower_device(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
Similar to zlaset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlaset_upper_device(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void zlaset_full_kernel(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *dA, int ldda )
{
zlaset_full_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void zlaset_lower_kernel(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *dA, int ldda )
{
zlaset_lower_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void zlaset_upper_kernel(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *dA, int ldda )
{
zlaset_upper_device(m, n, offdiag, diag, dA, ldda);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void zlaset_full_kernel_batched(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, int ldda )
{
int batchid = blockIdx.z;
zlaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void zlaset_lower_kernel_batched(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, int ldda )
{
int batchid = blockIdx.z;
zlaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void zlaset_upper_kernel_batched(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, int ldda )
{
int batchid = blockIdx.z;
zlaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the vbatched routine.
*/
__global__
void zlaset_full_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
zlaset_full_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
__global__
void zlaset_lower_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
zlaset_lower_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
__global__
void zlaset_upper_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
zlaset_upper_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
/***************************************************************************//**
Purpose
-------
ZLASET initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag COMPLEX_16
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag COMPLEX_16
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
and A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laset
*******************************************************************************/
extern "C"
void magmablas_zlaset(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if (uplo == MagmaLower) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( zlaset_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( zlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else if (uplo == MagmaUpper) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( zlaset_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( zlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else {
// if continuous in memory & set to zero, hipMemset is faster.
// TODO: use hipMemset2D ?
if ( m == ldda &&
MAGMA_Z_EQUAL( offdiag, MAGMA_Z_ZERO ) &&
MAGMA_Z_EQUAL( diag, MAGMA_Z_ZERO ) )
{
size_t size = m*n;
hipError_t err = hipMemsetAsync( dA, 0, size*sizeof(magmaDoubleComplex), queue->cuda_stream() );
assert( err == hipSuccess );
MAGMA_UNUSED( err );
}
else {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( zlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( zlaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
}
}
/******************************************************************************/
extern "C"
void magmablas_zlaset_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dAarray[], magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
hipLaunchKernelGGL(( zlaset_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
hipLaunchKernelGGL(( zlaset_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
else {
hipLaunchKernelGGL(( zlaset_full_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
}
/******************************************************************************/
extern "C"
void magmablas_zlaset_vbatched(
magma_uplo_t uplo, magma_int_t max_m, magma_int_t max_n,
magma_int_t* m, magma_int_t* n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dAarray[], magma_int_t* ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( max_m < 0 )
info = -2;
else if ( max_n < 0 )
info = -3;
//else if ( ldda < max(1,m) )
// info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( max_m == 0 || max_n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
hipLaunchKernelGGL(( zlaset_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
hipLaunchKernelGGL(( zlaset_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
else {
hipLaunchKernelGGL(( zlaset_full_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
}
| 5b8b1390c5e69d4f9141c0034714b041dd03df62.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Mark Gates
@author Azzam Haidar
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for zlaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd.
*/
static __device__
void zlaset_full_device(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_Z_EQUAL( offdiag, diag )));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
Similar to zlaset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlaset_lower_device(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
Similar to zlaset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlaset_upper_device(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void zlaset_full_kernel(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *dA, int ldda )
{
zlaset_full_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void zlaset_lower_kernel(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *dA, int ldda )
{
zlaset_lower_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void zlaset_upper_kernel(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *dA, int ldda )
{
zlaset_upper_device(m, n, offdiag, diag, dA, ldda);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void zlaset_full_kernel_batched(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, int ldda )
{
int batchid = blockIdx.z;
zlaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void zlaset_lower_kernel_batched(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, int ldda )
{
int batchid = blockIdx.z;
zlaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void zlaset_upper_kernel_batched(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, int ldda )
{
int batchid = blockIdx.z;
zlaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the vbatched routine.
*/
__global__
void zlaset_full_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
zlaset_full_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
__global__
void zlaset_lower_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
zlaset_lower_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
__global__
void zlaset_upper_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
zlaset_upper_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
/***************************************************************************//**
Purpose
-------
ZLASET initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag COMPLEX_16
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag COMPLEX_16
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
and A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laset
*******************************************************************************/
extern "C"
void magmablas_zlaset(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if (uplo == MagmaLower) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
zlaset_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
zlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else if (uplo == MagmaUpper) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
zlaset_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
zlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else {
// if continuous in memory & set to zero, cudaMemset is faster.
// TODO: use cudaMemset2D ?
if ( m == ldda &&
MAGMA_Z_EQUAL( offdiag, MAGMA_Z_ZERO ) &&
MAGMA_Z_EQUAL( diag, MAGMA_Z_ZERO ) )
{
size_t size = m*n;
cudaError_t err = cudaMemsetAsync( dA, 0, size*sizeof(magmaDoubleComplex), queue->cuda_stream() );
assert( err == cudaSuccess );
MAGMA_UNUSED( err );
}
else {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
zlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
zlaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
}
}
/******************************************************************************/
extern "C"
void magmablas_zlaset_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dAarray[], magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
zlaset_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
zlaset_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
else {
zlaset_full_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
}
/******************************************************************************/
extern "C"
void magmablas_zlaset_vbatched(
magma_uplo_t uplo, magma_int_t max_m, magma_int_t max_n,
magma_int_t* m, magma_int_t* n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dAarray[], magma_int_t* ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( max_m < 0 )
info = -2;
else if ( max_n < 0 )
info = -3;
//else if ( ldda < max(1,m) )
// info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( max_m == 0 || max_n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
zlaset_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
zlaset_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
else {
zlaset_full_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
}
|
f2b9b657a926eb18aeed66f6b5e79bfec4011fe9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* _reg_affineTransformation.h
*
*
* Created by Marc Modat on 25/03/2009.
* Copyright (c) 2009, University College London. All rights reserved.
* Centre for Medical Image Computing (CMIC)
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifdef _USE_CUDA
#ifndef _REG_AFFINETRANSFORMATION_KERNELS_CU
#define _REG_AFFINETRANSFORMATION_KERNELS_CU
#include "_reg_blocksize_gpu.h"
/* *************************************************************** */
/* *************************************************************** */
__device__ __constant__ int3 c_ImageSize;
__device__ __constant__ int c_VoxelNumber;
/* *************************************************************** */
texture<float4, 1, hipReadModeElementType> txAffineTransformation;
/* *************************************************************** */
/* *************************************************************** */
__global__
void reg_affine_positionField_kernel(float4 *PositionFieldArray)
{
const int tid= blockIdx.x*blockDim.x + threadIdx.x;
if(tid<c_VoxelNumber){
int3 imageSize = c_ImageSize;
short3 voxelIndex;
int tempIndex=tid;
voxelIndex.z=(int)(tempIndex/((imageSize.x)*(imageSize.y)));
tempIndex -= voxelIndex.z*(imageSize.x)*(imageSize.y);
voxelIndex.y=(int)(tempIndex/(imageSize.x));
voxelIndex.x = tempIndex - voxelIndex.y*(imageSize.x);
/* The transformation is applied */
float4 position;
float4 matrix = tex1Dfetch(txAffineTransformation,0);
position.x = matrix.x*voxelIndex.x + matrix.y*voxelIndex.y +
matrix.z*voxelIndex.z + matrix.w;
matrix = tex1Dfetch(txAffineTransformation,1);
position.y = matrix.x*voxelIndex.x + matrix.y*voxelIndex.y +
matrix.z*voxelIndex.z + matrix.w;
matrix = tex1Dfetch(txAffineTransformation,2);
position.z = matrix.x*voxelIndex.x + matrix.y*voxelIndex.y +
matrix.z*voxelIndex.z + matrix.w;
position.w=0.0f;
/* the deformation field (real coordinates) is stored */
PositionFieldArray[tid] = position;
}
}
/* *************************************************************** */
/* *************************************************************** */
#endif
#endif
| f2b9b657a926eb18aeed66f6b5e79bfec4011fe9.cu | /*
* _reg_affineTransformation.h
*
*
* Created by Marc Modat on 25/03/2009.
* Copyright (c) 2009, University College London. All rights reserved.
* Centre for Medical Image Computing (CMIC)
* See the LICENSE.txt file in the nifty_reg root folder
*
*/
#ifdef _USE_CUDA
#ifndef _REG_AFFINETRANSFORMATION_KERNELS_CU
#define _REG_AFFINETRANSFORMATION_KERNELS_CU
#include "_reg_blocksize_gpu.h"
/* *************************************************************** */
/* *************************************************************** */
__device__ __constant__ int3 c_ImageSize;
__device__ __constant__ int c_VoxelNumber;
/* *************************************************************** */
texture<float4, 1, cudaReadModeElementType> txAffineTransformation;
/* *************************************************************** */
/* *************************************************************** */
__global__
void reg_affine_positionField_kernel(float4 *PositionFieldArray)
{
const int tid= blockIdx.x*blockDim.x + threadIdx.x;
if(tid<c_VoxelNumber){
int3 imageSize = c_ImageSize;
short3 voxelIndex;
int tempIndex=tid;
voxelIndex.z=(int)(tempIndex/((imageSize.x)*(imageSize.y)));
tempIndex -= voxelIndex.z*(imageSize.x)*(imageSize.y);
voxelIndex.y=(int)(tempIndex/(imageSize.x));
voxelIndex.x = tempIndex - voxelIndex.y*(imageSize.x);
/* The transformation is applied */
float4 position;
float4 matrix = tex1Dfetch(txAffineTransformation,0);
position.x = matrix.x*voxelIndex.x + matrix.y*voxelIndex.y +
matrix.z*voxelIndex.z + matrix.w;
matrix = tex1Dfetch(txAffineTransformation,1);
position.y = matrix.x*voxelIndex.x + matrix.y*voxelIndex.y +
matrix.z*voxelIndex.z + matrix.w;
matrix = tex1Dfetch(txAffineTransformation,2);
position.z = matrix.x*voxelIndex.x + matrix.y*voxelIndex.y +
matrix.z*voxelIndex.z + matrix.w;
position.w=0.0f;
/* the deformation field (real coordinates) is stored */
PositionFieldArray[tid] = position;
}
}
/* *************************************************************** */
/* *************************************************************** */
#endif
#endif
|
a91de3df40610e37773281c0dff69070d89ed1f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_uvector.hpp>
#include <raft/handle.hpp>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/path_retrieval.hpp>
namespace cugraph {
namespace detail {
template <typename vertex_t, typename weight_t>
__global__ void get_traversed_cost_kernel(vertex_t const *vertices,
vertex_t const *preds,
vertex_t const *vtx_map,
weight_t const *info_weights,
weight_t *out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
for (vertex_t i = threadIdx.x + blockIdx.x * blockDim.x; i < num_vertices;
i += gridDim.x * blockDim.x) {
weight_t sum = info_weights[i];
vertex_t pred = preds[i];
while (pred != stop_vertex) {
vertex_t pos = vtx_map[pred];
sum += info_weights[pos];
pred = preds[pos];
}
out[i] = sum;
}
}
template <typename vertex_t, typename weight_t>
void get_traversed_cost_impl(raft::handle_t const &handle,
vertex_t const *vertices,
vertex_t const *preds,
weight_t const *info_weights,
weight_t *out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
auto stream = handle.get_stream();
vertex_t max_blocks = handle.get_device_properties().maxGridSize[0];
vertex_t max_threads = handle.get_device_properties().maxThreadsPerBlock;
dim3 nthreads, nblocks;
nthreads.x = std::min<vertex_t>(num_vertices, max_threads);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = std::min<vertex_t>((num_vertices + nthreads.x - 1) / nthreads.x, max_blocks);
nblocks.y = 1;
nblocks.z = 1;
rmm::device_uvector<vertex_t> vtx_map_v(num_vertices, stream);
rmm::device_uvector<vertex_t> vtx_keys_v(num_vertices, stream);
vertex_t *vtx_map = vtx_map_v.data();
vertex_t *vtx_keys = vtx_keys_v.data();
raft::copy(vtx_keys, vertices, num_vertices, stream);
thrust::sequence(rmm::exec_policy(stream)->on(stream), vtx_map, vtx_map + num_vertices);
thrust::stable_sort_by_key(
rmm::exec_policy(stream)->on(stream), vtx_keys, vtx_keys + num_vertices, vtx_map);
hipLaunchKernelGGL(( get_traversed_cost_kernel), dim3(nblocks), dim3(nthreads), 0, 0,
vertices, preds, vtx_map, info_weights, out, stop_vertex, num_vertices);
}
} // namespace detail
template <typename vertex_t, typename weight_t>
void get_traversed_cost(raft::handle_t const &handle,
vertex_t const *vertices,
vertex_t const *preds,
weight_t const *info_weights,
weight_t *out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
CUGRAPH_EXPECTS(num_vertices > 0, "num_vertices should be strictly positive");
CUGRAPH_EXPECTS(out != nullptr, "out should be of size num_vertices");
cugraph::detail::get_traversed_cost_impl(
handle, vertices, preds, info_weights, out, stop_vertex, num_vertices);
}
template void get_traversed_cost<int32_t, float>(raft::handle_t const &handle,
int32_t const *vertices,
int32_t const *preds,
float const *info_weights,
float *out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int32_t, double>(raft::handle_t const &handle,
int32_t const *vertices,
int32_t const *preds,
double const *info_weights,
double *out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int64_t, float>(raft::handle_t const &handle,
int64_t const *vertices,
int64_t const *preds,
float const *info_weights,
float *out,
int64_t stop_vertex,
int64_t num_vertices);
template void get_traversed_cost<int64_t, double>(raft::handle_t const &handle,
int64_t const *vertices,
int64_t const *preds,
double const *info_weights,
double *out,
int64_t stop_vertex,
int64_t num_vertices);
} // namespace cugraph
| a91de3df40610e37773281c0dff69070d89ed1f1.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_uvector.hpp>
#include <raft/handle.hpp>
#include <cugraph/utilities/error.hpp>
#include <cugraph/utilities/path_retrieval.hpp>
namespace cugraph {
namespace detail {
template <typename vertex_t, typename weight_t>
__global__ void get_traversed_cost_kernel(vertex_t const *vertices,
vertex_t const *preds,
vertex_t const *vtx_map,
weight_t const *info_weights,
weight_t *out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
for (vertex_t i = threadIdx.x + blockIdx.x * blockDim.x; i < num_vertices;
i += gridDim.x * blockDim.x) {
weight_t sum = info_weights[i];
vertex_t pred = preds[i];
while (pred != stop_vertex) {
vertex_t pos = vtx_map[pred];
sum += info_weights[pos];
pred = preds[pos];
}
out[i] = sum;
}
}
template <typename vertex_t, typename weight_t>
void get_traversed_cost_impl(raft::handle_t const &handle,
vertex_t const *vertices,
vertex_t const *preds,
weight_t const *info_weights,
weight_t *out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
auto stream = handle.get_stream();
vertex_t max_blocks = handle.get_device_properties().maxGridSize[0];
vertex_t max_threads = handle.get_device_properties().maxThreadsPerBlock;
dim3 nthreads, nblocks;
nthreads.x = std::min<vertex_t>(num_vertices, max_threads);
nthreads.y = 1;
nthreads.z = 1;
nblocks.x = std::min<vertex_t>((num_vertices + nthreads.x - 1) / nthreads.x, max_blocks);
nblocks.y = 1;
nblocks.z = 1;
rmm::device_uvector<vertex_t> vtx_map_v(num_vertices, stream);
rmm::device_uvector<vertex_t> vtx_keys_v(num_vertices, stream);
vertex_t *vtx_map = vtx_map_v.data();
vertex_t *vtx_keys = vtx_keys_v.data();
raft::copy(vtx_keys, vertices, num_vertices, stream);
thrust::sequence(rmm::exec_policy(stream)->on(stream), vtx_map, vtx_map + num_vertices);
thrust::stable_sort_by_key(
rmm::exec_policy(stream)->on(stream), vtx_keys, vtx_keys + num_vertices, vtx_map);
get_traversed_cost_kernel<<<nblocks, nthreads>>>(
vertices, preds, vtx_map, info_weights, out, stop_vertex, num_vertices);
}
} // namespace detail
template <typename vertex_t, typename weight_t>
void get_traversed_cost(raft::handle_t const &handle,
vertex_t const *vertices,
vertex_t const *preds,
weight_t const *info_weights,
weight_t *out,
vertex_t stop_vertex,
vertex_t num_vertices)
{
CUGRAPH_EXPECTS(num_vertices > 0, "num_vertices should be strictly positive");
CUGRAPH_EXPECTS(out != nullptr, "out should be of size num_vertices");
cugraph::detail::get_traversed_cost_impl(
handle, vertices, preds, info_weights, out, stop_vertex, num_vertices);
}
template void get_traversed_cost<int32_t, float>(raft::handle_t const &handle,
int32_t const *vertices,
int32_t const *preds,
float const *info_weights,
float *out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int32_t, double>(raft::handle_t const &handle,
int32_t const *vertices,
int32_t const *preds,
double const *info_weights,
double *out,
int32_t stop_vertex,
int32_t num_vertices);
template void get_traversed_cost<int64_t, float>(raft::handle_t const &handle,
int64_t const *vertices,
int64_t const *preds,
float const *info_weights,
float *out,
int64_t stop_vertex,
int64_t num_vertices);
template void get_traversed_cost<int64_t, double>(raft::handle_t const &handle,
int64_t const *vertices,
int64_t const *preds,
double const *info_weights,
double *out,
int64_t stop_vertex,
int64_t num_vertices);
} // namespace cugraph
|
dbc1e95be62e8f617eae5bdcee96e323d29acbe5.hip | // !!! This is a file automatically generated by hipify!!!
#include "Util.h"
int main()
{
int num_devices;
hipGetDeviceCount(&num_devices);
//num_devices = 4;
for (int i = 0; i < num_devices; i++) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
}
printf("done printinig");
// for (int dev_id = 0; dev_id < num_devices; ++dev_id)
// {
// CUDA_RT_CALL(hipSetDevice(1));
// int canAccessPeer = 0;
// CUDA_RT_CALL(hipDeviceCanAccessPeer(&canAccessPeer, 1, 0));
// printf("device %d peerdevice %d canaccees %d\n",1,0,canAccessPeer);
// if (canAccessPeer)
// {
// CUDA_RT_CALL(hipDeviceEnablePeerAccess(0, 0));
// }
//RunByModelSerial();// FOR ROY!!!!!!!!!!!!!!! // Run, output VHot and look
/*Add to first kernel
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
*/
//printf("we are in the %s directory\n",_getcwd( NULL, 0 ));
printf("1!");
CUDA_RT_CALL(hipSetDevice(0));
RunByModelP();
return 0;
}
| dbc1e95be62e8f617eae5bdcee96e323d29acbe5.cu |
#include "Util.h"
int main()
{
int num_devices;
cudaGetDeviceCount(&num_devices);
//num_devices = 4;
for (int i = 0; i < num_devices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth / 8) / 1.0e6);
}
printf("done printinig");
// for (int dev_id = 0; dev_id < num_devices; ++dev_id)
// {
// CUDA_RT_CALL(cudaSetDevice(1));
// int canAccessPeer = 0;
// CUDA_RT_CALL(cudaDeviceCanAccessPeer(&canAccessPeer, 1, 0));
// printf("device %d peerdevice %d canaccees %d\n",1,0,canAccessPeer);
// if (canAccessPeer)
// {
// CUDA_RT_CALL(cudaDeviceEnablePeerAccess(0, 0));
// }
//RunByModelSerial();// FOR ROY!!!!!!!!!!!!!!! // Run, output VHot and look
/*Add to first kernel
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
*/
//printf("we are in the %s directory\n",_getcwd( NULL, 0 ));
printf("1!");
CUDA_RT_CALL(cudaSetDevice(0));
RunByModelP();
return 0;
}
|
ab7752c3b887ddd7e6d566a9ffd2007f54354328.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef __GNUC__
#include "onnxruntime_config.h"
#pragma GCC diagnostic ignored "-Wswitch"
#endif
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "where_impl.h"
namespace onnxruntime {
namespace cuda {
// broadcast by computing output coordinate from offset, using fast_divmod
template <typename T, BroadcastIndexType CondIndexType, BroadcastIndexType XIndexType, BroadcastIndexType YIndexType, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _TenaryElementWise(
size_t output_rank,
const TArray<int64_t> cond_padded_strides,
const bool* cond_data,
const TArray<int64_t> x_padded_strides,
const T* x_data,
const TArray<int64_t> y_padded_strides,
const T* y_data,
const TArray<fast_divmod> fdm_output_strides,
T* output_data,
CUDA_LONG N) {
CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
bool cond_value[NumElementsPerThread];
T x_value[NumElementsPerThread];
T y_value[NumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
// compute indexes with broadcasting rules: https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
CUDA_LONG cond_index = (CondIndexType == BroadcastIndexType::NoBroadcast ? id : 0);
CUDA_LONG x_index = (XIndexType == BroadcastIndexType::NoBroadcast ? id : 0);
CUDA_LONG y_index = (YIndexType == BroadcastIndexType::NoBroadcast ? id : 0);
CUDA_LONG offset = id;
#pragma unroll
for (auto dim = 0; dim < fdm_output_strides.Capacity(); dim++) {
if (dim >= output_rank) {
break;
}
int q, r;
fdm_output_strides[dim].divmod(offset, q, r);
if (CondIndexType == BroadcastIndexType::NeedCompute) {
cond_index += static_cast<int>(cond_padded_strides[dim]) * q;
}
if (XIndexType == BroadcastIndexType::NeedCompute) {
x_index += static_cast<int>(x_padded_strides[dim]) * q;
}
if (YIndexType == BroadcastIndexType::NeedCompute) {
y_index += static_cast<int>(y_padded_strides[dim]) * q;
}
offset = r;
}
cond_value[i] = cond_data[cond_index];
x_value[i] = x_data[x_index];
y_value[i] = y_data[y_index];
id += NumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = cond_value[i] ? x_value[i] : y_value[i];
id += NumThreadsPerBlock;
}
}
}
// for scalar broadcast or non-broadcast case
template <typename T, BroadcastIndexType CondIndexType, BroadcastIndexType XIndexType, BroadcastIndexType YIndexType, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _TenaryElementWiseSimple(
const bool* cond_data,
const T* x_data,
const T* y_data,
T* output_data,
CUDA_LONG N) {
CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
bool cond_value[NumElementsPerThread];
T x_value[NumElementsPerThread];
T y_value[NumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
cond_value[i] = cond_data[CondIndexType == BroadcastIndexType::NoBroadcast ? id : 0];
x_value[i] = x_data[XIndexType == BroadcastIndexType::NoBroadcast ? id : 0];
y_value[i] = y_data[YIndexType == BroadcastIndexType::NoBroadcast ? id : 0];
id += NumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = cond_value[i] ? x_value[i] : y_value[i];
id += NumThreadsPerBlock;
}
}
}
#define HANDLE_Y_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, X_INDEX_TYPE, Y_INDEX_TYPE) \
case Y_INDEX_TYPE: { \
hipLaunchKernelGGL(( _TenaryElementWiseSimple<T, \
COND_INDEX_TYPE, \
X_INDEX_TYPE, \
Y_INDEX_TYPE, \
GridDim::maxThreadsPerBlock, \
GridDim::maxElementsPerThread>) \
, dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, cond_data, \
x_data, \
y_data, \
output_data, \
N); \
} break
#define HANDLE_X_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, X_INDEX_TYPE, Y_INDEX_TYPE_VAL) \
case X_INDEX_TYPE: { \
switch (Y_INDEX_TYPE_VAL) { \
HANDLE_Y_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, X_INDEX_TYPE, BroadcastIndexType::NoBroadcast); \
HANDLE_Y_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, X_INDEX_TYPE, BroadcastIndexType::Scalar); \
} \
} break
#define HANDLE_COND_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, X_INDEX_TYPE_VAL, Y_INDEX_TYPE_VAL) \
case COND_INDEX_TYPE: { \
switch (X_INDEX_TYPE_VAL) { \
HANDLE_X_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, BroadcastIndexType::NoBroadcast, Y_INDEX_TYPE_VAL); \
HANDLE_X_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, BroadcastIndexType::Scalar, Y_INDEX_TYPE_VAL); \
} \
} break
#define HANDLE_Y_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE, Y_INDEX_TYPE) \
case Y_INDEX_TYPE: { \
hipLaunchKernelGGL(( _TenaryElementWise<T, \
COND_INDEX_TYPE, \
X_INDEX_TYPE, \
Y_INDEX_TYPE, \
GridDim::maxThreadsPerBlock, \
GridDim::maxElementsPerThread>) \
, dim3(blocksPerGrid), dim3(GridDim::maxThreadsPerBlock), 0, stream, output_rank_or_simple_broadcast, \
cond_padded_strides, \
cond_data, \
x_padded_strides, \
x_data, \
y_padded_strides, \
y_data, \
fdm_output_strides, \
output_data, \
N); \
} break
#define HANDLE_X_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE, Y_INDEX_TYPE_VAL) \
case X_INDEX_TYPE: { \
switch (Y_INDEX_TYPE_VAL) { \
HANDLE_Y_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE, BroadcastIndexType::NoBroadcast); \
HANDLE_Y_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE, BroadcastIndexType::Scalar); \
HANDLE_Y_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE, BroadcastIndexType::NeedCompute); \
} \
} break
#define HANDLE_COND_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE_VAL, Y_INDEX_TYPE_VAL) \
case COND_INDEX_TYPE: { \
switch (X_INDEX_TYPE_VAL) { \
HANDLE_X_INDEX_TYPE(COND_INDEX_TYPE, BroadcastIndexType::NoBroadcast, Y_INDEX_TYPE_VAL); \
HANDLE_X_INDEX_TYPE(COND_INDEX_TYPE, BroadcastIndexType::Scalar, Y_INDEX_TYPE_VAL); \
HANDLE_X_INDEX_TYPE(COND_INDEX_TYPE, BroadcastIndexType::NeedCompute, Y_INDEX_TYPE_VAL); \
} \
} break
template <typename T>
void WhereImpl(
hipStream_t stream,
size_t output_rank_or_simple_broadcast,
BroadcastIndexType cond_index_type,
const TArray<int64_t>& cond_padded_strides,
const bool* cond_data,
BroadcastIndexType x_index_type,
const TArray<int64_t>& x_padded_strides,
const T* x_data,
BroadcastIndexType y_index_type,
const TArray<int64_t>& y_padded_strides,
const T* y_data,
const TArray<fast_divmod>& fdm_output_strides,
T* output_data,
size_t count) {
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
if (output_rank_or_simple_broadcast == static_cast<size_t>(SimpleBroadcast::NoBroadcast)) {
switch (cond_index_type) {
HANDLE_COND_INDEX_TYPE_SIMPLE(BroadcastIndexType::NoBroadcast, x_index_type, y_index_type);
HANDLE_COND_INDEX_TYPE_SIMPLE(BroadcastIndexType::Scalar, x_index_type, y_index_type);
}
} else {
switch (cond_index_type) {
HANDLE_COND_INDEX_TYPE(BroadcastIndexType::NoBroadcast, x_index_type, y_index_type);
HANDLE_COND_INDEX_TYPE(BroadcastIndexType::Scalar, x_index_type, y_index_type);
HANDLE_COND_INDEX_TYPE(BroadcastIndexType::NeedCompute, x_index_type, y_index_type);
}
}
}
#define SPECIALIZED_IMPL(T) \
template void WhereImpl<T>(hipStream_t stream, \
size_t output_rank_or_simple_broadcast, \
BroadcastIndexType cond_index_type, \
const TArray<int64_t>& cond_padded_strides, \
const bool* cond_data, \
BroadcastIndexType x_index_type, \
const TArray<int64_t>& x_padded_strides, \
const T* x_data, \
BroadcastIndexType y_index_type, \
const TArray<int64_t>& y_padded_strides, \
const T* y_data, \
const TArray<fast_divmod>& fdm_output_strides, \
T* output_data, \
size_t count);
SPECIALIZED_IMPL(uint8_t)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(int64_t)
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double_t)
SPECIALIZED_IMPL(half)
} // namespace cuda
} // namespace onnxruntime
| ab7752c3b887ddd7e6d566a9ffd2007f54354328.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#ifdef __GNUC__
#include "onnxruntime_config.h"
#pragma GCC diagnostic ignored "-Wswitch"
#endif
#include <stdint.h>
#include "core/providers/cuda/shared_inc/cuda_utils.h"
#include "core/providers/cuda/cu_inc/common.cuh"
#include "where_impl.h"
namespace onnxruntime {
namespace cuda {
// broadcast by computing output coordinate from offset, using fast_divmod
template <typename T, BroadcastIndexType CondIndexType, BroadcastIndexType XIndexType, BroadcastIndexType YIndexType, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _TenaryElementWise(
size_t output_rank,
const TArray<int64_t> cond_padded_strides,
const bool* cond_data,
const TArray<int64_t> x_padded_strides,
const T* x_data,
const TArray<int64_t> y_padded_strides,
const T* y_data,
const TArray<fast_divmod> fdm_output_strides,
T* output_data,
CUDA_LONG N) {
CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
bool cond_value[NumElementsPerThread];
T x_value[NumElementsPerThread];
T y_value[NumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
// compute indexes with broadcasting rules: https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
CUDA_LONG cond_index = (CondIndexType == BroadcastIndexType::NoBroadcast ? id : 0);
CUDA_LONG x_index = (XIndexType == BroadcastIndexType::NoBroadcast ? id : 0);
CUDA_LONG y_index = (YIndexType == BroadcastIndexType::NoBroadcast ? id : 0);
CUDA_LONG offset = id;
#pragma unroll
for (auto dim = 0; dim < fdm_output_strides.Capacity(); dim++) {
if (dim >= output_rank) {
break;
}
int q, r;
fdm_output_strides[dim].divmod(offset, q, r);
if (CondIndexType == BroadcastIndexType::NeedCompute) {
cond_index += static_cast<int>(cond_padded_strides[dim]) * q;
}
if (XIndexType == BroadcastIndexType::NeedCompute) {
x_index += static_cast<int>(x_padded_strides[dim]) * q;
}
if (YIndexType == BroadcastIndexType::NeedCompute) {
y_index += static_cast<int>(y_padded_strides[dim]) * q;
}
offset = r;
}
cond_value[i] = cond_data[cond_index];
x_value[i] = x_data[x_index];
y_value[i] = y_data[y_index];
id += NumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = cond_value[i] ? x_value[i] : y_value[i];
id += NumThreadsPerBlock;
}
}
}
// for scalar broadcast or non-broadcast case
template <typename T, BroadcastIndexType CondIndexType, BroadcastIndexType XIndexType, BroadcastIndexType YIndexType, int NumThreadsPerBlock, int NumElementsPerThread>
__global__ void _TenaryElementWiseSimple(
const bool* cond_data,
const T* x_data,
const T* y_data,
T* output_data,
CUDA_LONG N) {
CUDA_LONG start = NumElementsPerThread * NumThreadsPerBlock * blockIdx.x + threadIdx.x;
bool cond_value[NumElementsPerThread];
T x_value[NumElementsPerThread];
T y_value[NumElementsPerThread];
CUDA_LONG id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
cond_value[i] = cond_data[CondIndexType == BroadcastIndexType::NoBroadcast ? id : 0];
x_value[i] = x_data[XIndexType == BroadcastIndexType::NoBroadcast ? id : 0];
y_value[i] = y_data[YIndexType == BroadcastIndexType::NoBroadcast ? id : 0];
id += NumThreadsPerBlock;
}
}
id = start;
#pragma unroll
for (int i = 0; i < NumElementsPerThread; i++) {
if (id < N) {
output_data[id] = cond_value[i] ? x_value[i] : y_value[i];
id += NumThreadsPerBlock;
}
}
}
#define HANDLE_Y_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, X_INDEX_TYPE, Y_INDEX_TYPE) \
case Y_INDEX_TYPE: { \
_TenaryElementWiseSimple<T, \
COND_INDEX_TYPE, \
X_INDEX_TYPE, \
Y_INDEX_TYPE, \
GridDim::maxThreadsPerBlock, \
GridDim::maxElementsPerThread> \
<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(cond_data, \
x_data, \
y_data, \
output_data, \
N); \
} break
#define HANDLE_X_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, X_INDEX_TYPE, Y_INDEX_TYPE_VAL) \
case X_INDEX_TYPE: { \
switch (Y_INDEX_TYPE_VAL) { \
HANDLE_Y_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, X_INDEX_TYPE, BroadcastIndexType::NoBroadcast); \
HANDLE_Y_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, X_INDEX_TYPE, BroadcastIndexType::Scalar); \
} \
} break
#define HANDLE_COND_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, X_INDEX_TYPE_VAL, Y_INDEX_TYPE_VAL) \
case COND_INDEX_TYPE: { \
switch (X_INDEX_TYPE_VAL) { \
HANDLE_X_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, BroadcastIndexType::NoBroadcast, Y_INDEX_TYPE_VAL); \
HANDLE_X_INDEX_TYPE_SIMPLE(COND_INDEX_TYPE, BroadcastIndexType::Scalar, Y_INDEX_TYPE_VAL); \
} \
} break
#define HANDLE_Y_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE, Y_INDEX_TYPE) \
case Y_INDEX_TYPE: { \
_TenaryElementWise<T, \
COND_INDEX_TYPE, \
X_INDEX_TYPE, \
Y_INDEX_TYPE, \
GridDim::maxThreadsPerBlock, \
GridDim::maxElementsPerThread> \
<<<blocksPerGrid, GridDim::maxThreadsPerBlock, 0, stream>>>(output_rank_or_simple_broadcast, \
cond_padded_strides, \
cond_data, \
x_padded_strides, \
x_data, \
y_padded_strides, \
y_data, \
fdm_output_strides, \
output_data, \
N); \
} break
#define HANDLE_X_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE, Y_INDEX_TYPE_VAL) \
case X_INDEX_TYPE: { \
switch (Y_INDEX_TYPE_VAL) { \
HANDLE_Y_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE, BroadcastIndexType::NoBroadcast); \
HANDLE_Y_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE, BroadcastIndexType::Scalar); \
HANDLE_Y_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE, BroadcastIndexType::NeedCompute); \
} \
} break
#define HANDLE_COND_INDEX_TYPE(COND_INDEX_TYPE, X_INDEX_TYPE_VAL, Y_INDEX_TYPE_VAL) \
case COND_INDEX_TYPE: { \
switch (X_INDEX_TYPE_VAL) { \
HANDLE_X_INDEX_TYPE(COND_INDEX_TYPE, BroadcastIndexType::NoBroadcast, Y_INDEX_TYPE_VAL); \
HANDLE_X_INDEX_TYPE(COND_INDEX_TYPE, BroadcastIndexType::Scalar, Y_INDEX_TYPE_VAL); \
HANDLE_X_INDEX_TYPE(COND_INDEX_TYPE, BroadcastIndexType::NeedCompute, Y_INDEX_TYPE_VAL); \
} \
} break
template <typename T>
void WhereImpl(
cudaStream_t stream,
size_t output_rank_or_simple_broadcast,
BroadcastIndexType cond_index_type,
const TArray<int64_t>& cond_padded_strides,
const bool* cond_data,
BroadcastIndexType x_index_type,
const TArray<int64_t>& x_padded_strides,
const T* x_data,
BroadcastIndexType y_index_type,
const TArray<int64_t>& y_padded_strides,
const T* y_data,
const TArray<fast_divmod>& fdm_output_strides,
T* output_data,
size_t count) {
int blocksPerGrid = static_cast<int>(CeilDiv(count, GridDim::maxThreadsPerBlock * GridDim::maxElementsPerThread));
CUDA_LONG N = static_cast<CUDA_LONG>(count);
if (output_rank_or_simple_broadcast == static_cast<size_t>(SimpleBroadcast::NoBroadcast)) {
switch (cond_index_type) {
HANDLE_COND_INDEX_TYPE_SIMPLE(BroadcastIndexType::NoBroadcast, x_index_type, y_index_type);
HANDLE_COND_INDEX_TYPE_SIMPLE(BroadcastIndexType::Scalar, x_index_type, y_index_type);
}
} else {
switch (cond_index_type) {
HANDLE_COND_INDEX_TYPE(BroadcastIndexType::NoBroadcast, x_index_type, y_index_type);
HANDLE_COND_INDEX_TYPE(BroadcastIndexType::Scalar, x_index_type, y_index_type);
HANDLE_COND_INDEX_TYPE(BroadcastIndexType::NeedCompute, x_index_type, y_index_type);
}
}
}
#define SPECIALIZED_IMPL(T) \
template void WhereImpl<T>(cudaStream_t stream, \
size_t output_rank_or_simple_broadcast, \
BroadcastIndexType cond_index_type, \
const TArray<int64_t>& cond_padded_strides, \
const bool* cond_data, \
BroadcastIndexType x_index_type, \
const TArray<int64_t>& x_padded_strides, \
const T* x_data, \
BroadcastIndexType y_index_type, \
const TArray<int64_t>& y_padded_strides, \
const T* y_data, \
const TArray<fast_divmod>& fdm_output_strides, \
T* output_data, \
size_t count);
SPECIALIZED_IMPL(uint8_t)
SPECIALIZED_IMPL(int32_t)
SPECIALIZED_IMPL(int64_t)
SPECIALIZED_IMPL(float)
SPECIALIZED_IMPL(double_t)
SPECIALIZED_IMPL(half)
} // namespace cuda
} // namespace onnxruntime
|
bd6ee3b9f9280c14c0489c19f7db622aeb384799.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bvh_common.h"
#include "bvh_math.cuh"
#include "Aabb.cuh"
#define CALC_TETRA_AABB_NUM_THREADS 512
__global__ void formTriangleAabbs_kernel(Aabb *dst, float3 * pos, float3 * vel, float h,
uint4 * tetrahedronVertices,
unsigned maxNumPerTetVs)
{
__shared__ float3 sP0[CALC_TETRA_AABB_NUM_THREADS];
__shared__ float3 sP1[CALC_TETRA_AABB_NUM_THREADS];
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= maxNumPerTetVs) return;
uint itet = idx>>2;
uint ivert = idx & 3;
uint * vtet = & tetrahedronVertices[itet].x;
uint iv = vtet[ivert];
sP0[threadIdx.x] = pos[iv];
sP1[threadIdx.x] = float3_progress(pos[iv], vel[iv], h);
__syncthreads();
if(ivert > 0) return;
Aabb res;
resetAabb(res);
expandAabb(res, sP0[threadIdx.x]);
expandAabb(res, sP1[threadIdx.x]);
expandAabb(res, sP0[threadIdx.x + 1]);
expandAabb(res, sP1[threadIdx.x + 1]);
expandAabb(res, sP0[threadIdx.x + 2]);
expandAabb(res, sP1[threadIdx.x + 2]);
dst[itet] = res;
}
namespace trianglesys {
void formTetrahedronAabbs(Aabb * dst,
float3 * pos,
float3 * vel,
float timeStep,
uint4 * tets,
uint numTriangles)
{
int tpb = CALC_TETRA_AABB_NUM_THREADS;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(numTriangles<<2, tpb);
dim3 grid(nblk, 1, 1);
hipLaunchKernelGGL(( formTriangleAabbs_kernel), dim3(grid), dim3(block) , 0, 0, dst, pos, vel, timeStep, tets, numTriangles<<2);
}
}
| bd6ee3b9f9280c14c0489c19f7db622aeb384799.cu | #include "bvh_common.h"
#include "bvh_math.cuh"
#include "Aabb.cuh"
#define CALC_TETRA_AABB_NUM_THREADS 512
__global__ void formTriangleAabbs_kernel(Aabb *dst, float3 * pos, float3 * vel, float h,
uint4 * tetrahedronVertices,
unsigned maxNumPerTetVs)
{
__shared__ float3 sP0[CALC_TETRA_AABB_NUM_THREADS];
__shared__ float3 sP1[CALC_TETRA_AABB_NUM_THREADS];
uint idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= maxNumPerTetVs) return;
uint itet = idx>>2;
uint ivert = idx & 3;
uint * vtet = & tetrahedronVertices[itet].x;
uint iv = vtet[ivert];
sP0[threadIdx.x] = pos[iv];
sP1[threadIdx.x] = float3_progress(pos[iv], vel[iv], h);
__syncthreads();
if(ivert > 0) return;
Aabb res;
resetAabb(res);
expandAabb(res, sP0[threadIdx.x]);
expandAabb(res, sP1[threadIdx.x]);
expandAabb(res, sP0[threadIdx.x + 1]);
expandAabb(res, sP1[threadIdx.x + 1]);
expandAabb(res, sP0[threadIdx.x + 2]);
expandAabb(res, sP1[threadIdx.x + 2]);
dst[itet] = res;
}
namespace trianglesys {
void formTetrahedronAabbs(Aabb * dst,
float3 * pos,
float3 * vel,
float timeStep,
uint4 * tets,
uint numTriangles)
{
int tpb = CALC_TETRA_AABB_NUM_THREADS;
dim3 block(tpb, 1, 1);
unsigned nblk = iDivUp(numTriangles<<2, tpb);
dim3 grid(nblk, 1, 1);
formTriangleAabbs_kernel<<< grid, block >>>(dst, pos, vel, timeStep, tets, numTriangles<<2);
}
}
|
8f630912141fbdfede87ac0eb1084e25f78bab0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <vector>
#include <iostream>
#include <chrono>
#include <hip/hip_runtime.h>
#include "util.h" // graph
#define DIAMETER_SAMPLES 512
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#ifndef checkCudaErrors
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
// These are the inline versions for all of the SDK helper functions
inline void __checkCudaErrors(hipError_t err, const char *file, const int line)
{
if (hipSuccess != err)
{
std::cerr << "CUDA Error = " << err << ": " << hipGetErrorString(err) << " from file "
<< file << ", line " << line << std::endl;
}
}
#endif
//Note: N must be a power of two
//Simple/Naive bitonic sort. We're only sorting ~512 elements one time, so performance isn't important
__device__ void bitonic_sort(int *values, const int N)
{
unsigned int idx = threadIdx.x;
for (int k = 2; k <= N; k <<= 1)
{
for (int j = k >> 1; j > 0; j = j >> 1)
{
while(idx < N)
{
int ixj = idx^j;
if (ixj > idx)
{
if ((idx&k) == 0 && values[idx] > values[ixj])
{
//exchange(idx, ixj);
int tmp = values[idx];
values[idx] = values[ixj];
values[ixj] = tmp;
}
if ((idx&k) != 0 && values[idx] < values[ixj])
{
//exchange(idx, ixj);
int tmp = values[idx];
values[idx] = values[ixj];
values[ixj] = tmp;
}
}
idx += blockDim.x;
}
__syncthreads();
idx = threadIdx.x;
}
}
}
__global__ void bc_kernel(
float *__restrict__ bc,
const int *__restrict__ R,
const int *__restrict__ C,
const int *__restrict__ F,
const int n,
const int m,
const int *__restrict__ d,
const unsigned long long *__restrict__ sigma,
const float *__restrict__ delta,
const int *__restrict__ Q,
const int *__restrict__ Q2,
const int *__restrict__ S,
const int *__restrict__ endpoints,
int *__restrict__ next_source,
const size_t pitch_d,
const size_t pitch_sigma,
const size_t pitch_delta,
const size_t pitch_Q,
const size_t pitch_Q2,
const size_t pitch_S,
const size_t pitch_endpoints,
const int start,
const int end,
int *__restrict__ jia,
int *__restrict__ diameters,
const int *__restrict__ source_vertices,
const bool approx)
{
__shared__ int ind;
__shared__ int i;
__shared__ int *Q_row;
__shared__ int *Q2_row;
__shared__ int *S_row;
__shared__ int *endpoints_row;
int j = threadIdx.x;
int *d_row = (int*)((char*)d + blockIdx.x*pitch_d);
unsigned long long *sigma_row = (unsigned long long*)((char*)sigma + blockIdx.x*pitch_sigma);
float *delta_row = (float*)((char*)delta + blockIdx.x*pitch_delta);
if(j == 0)
{
ind = blockIdx.x + start;
i = approx ? source_vertices[ind] : ind;
Q_row = (int*)((char*)Q + blockIdx.x*pitch_Q);
Q2_row = (int*)((char*)Q2 + blockIdx.x*pitch_Q2);
S_row = (int*)((char*)S + blockIdx.x*pitch_S);
endpoints_row = (int*)((char*)endpoints + blockIdx.x*pitch_endpoints);
*jia = 0;
}
__syncthreads();
if((ind==0) && (j < DIAMETER_SAMPLES))
{
diameters[j] = INT_MAX;
}
__syncthreads();
while(ind < end)
{
//Initialization
for(int k=threadIdx.x; k<n; k+=blockDim.x)
{
if(k == i) //If k is the source node...
{
d_row[k] = 0;
sigma_row[k] = 1;
}
else
{
d_row[k] = INT_MAX;
sigma_row[k] = 0;
}
delta_row[k] = 0;
}
__syncthreads();
//Shortest Path Calculation
__shared__ int Q_len;
__shared__ int Q2_len;
__shared__ int S_len;
__shared__ int current_depth;
__shared__ int endpoints_len;
__shared__ bool sp_calc_done;
if(j == 0)
{
Q_row[0] = i;
Q_len = 1;
Q2_len = 0;
S_row[0] = i;
S_len = 1;
endpoints_row[0] = 0;
endpoints_row[1] = 1;
endpoints_len = 2;
current_depth = 0;
sp_calc_done = false;
}
__syncthreads();
//Do first iteration separately since we already know the edges to traverse
for(int r=threadIdx.x+R[i]; r<R[i+1]; r+=blockDim.x)
{
int w = C[r];
//No multiple/self edges - each value of w is unique, so no need for atomics
if(d_row[w] == INT_MAX)
{
d_row[w] = 1;
int t = atomicAdd(&Q2_len,1);
Q2_row[t] = w;
}
if(d_row[w] == (d_row[i]+1))
{
atomicAdd(&sigma_row[w],1);
}
}
__syncthreads();
if(Q2_len == 0)
{
sp_calc_done = true;
}
else
{
for(int kk=threadIdx.x; kk<Q2_len; kk+=blockDim.x)
{
Q_row[kk] = Q2_row[kk];
S_row[kk+S_len] = Q2_row[kk];
}
__syncthreads();
if(j == 0)
{
endpoints_row[endpoints_len] = endpoints_row[endpoints_len-1] + Q2_len;
endpoints_len++;
Q_len = Q2_len;
S_len += Q2_len;
Q2_len = 0;
current_depth++;
}
}
__syncthreads();
while(!sp_calc_done)
{
if((*jia) && (Q_len > 512))
{
for(int k=threadIdx.x; k<2*m; k+=blockDim.x)
{
int v = F[k];
if(d_row[v] == current_depth)
{
int w = C[k];
if(atomicCAS(&d_row[w],INT_MAX,d_row[v]+1) == INT_MAX)
{
int t = atomicAdd(&Q2_len,1);
Q2_row[t] = w;
}
if(d_row[w] == (d_row[v]+1))
{
atomicAdd(&sigma_row[w],sigma_row[v]);
}
}
}
}
else
{
__shared__ int next_index;
if(j == 0)
{
next_index = blockDim.x;
}
__syncthreads();
int k = threadIdx.x; //Initial vertices
while(k < Q_len)
{
int v = Q_row[k];
for(int r=R[v]; r<R[v+1]; r++)
{
int w = C[r];
//Use atomicCAS to prevent duplicates
if(atomicCAS(&d_row[w],INT_MAX,d_row[v]+1) == INT_MAX)
{
int t = atomicAdd(&Q2_len,1);
Q2_row[t] = w;
}
if(d_row[w] == (d_row[v]+1))
{
atomicAdd(&sigma_row[w],sigma_row[v]);
}
}
k = atomicAdd(&next_index,1);
}
}
__syncthreads();
if(Q2_len == 0) //If there is no additional work found, we're done
{
break;
}
else //If there is additional work, transfer elements from Q2 to Q, reset lengths, and add vertices to the stack
{
for(int kk=threadIdx.x; kk<Q2_len; kk+=blockDim.x)
{
Q_row[kk] = Q2_row[kk];
S_row[kk+S_len] = Q2_row[kk];
}
__syncthreads();
if(j == 0)
{
endpoints_row[endpoints_len] = endpoints_row[endpoints_len-1] + Q2_len;
endpoints_len++;
Q_len = Q2_len;
S_len += Q2_len;
Q2_len = 0;
current_depth++;
}
__syncthreads();
}
}
//The elements at the end of the stack will have the largest distance from the source
//Using the successor method, we can start from one depth earlier
if(j == 0)
{
current_depth = d_row[S_row[S_len-1]] - 1;
if(ind<DIAMETER_SAMPLES)
{
diameters[ind] = current_depth+1;
}
}
__syncthreads();
//Dependency Accumulation (Madduri/Ediger successor method)
while(current_depth > 0)
{
int stack_iter_len = endpoints_row[current_depth+1]-endpoints_row[current_depth];
if((*jia) && (stack_iter_len>512))
{
for(int kk=threadIdx.x; kk<2*m; kk+=blockDim.x)
{
int w = F[kk];
if(d_row[w] == current_depth)
{
int v = C[kk];
if(d_row[v] == (d_row[w]+1))
{
float change = (sigma_row[w]/(float)sigma_row[v])*(1.0f+delta_row[v]);
atomicAdd(&delta_row[w],change);
}
}
}
}
else
{
for(int kk=threadIdx.x+endpoints_row[current_depth]; kk<endpoints_row[current_depth+1]; kk+=blockDim.x)
{
int w = S_row[kk];
float dsw = 0;
float sw = (float)sigma_row[w];
for(int z=R[w]; z<R[w+1]; z++)
{
int v = C[z];
if(d_row[v] == (d_row[w]+1))
{
dsw += (sw/(float)sigma_row[v])*(1.0f+delta_row[v]);
}
}
delta_row[w] = dsw;
}
}
__syncthreads();
if(j == 0)
{
current_depth--;
}
__syncthreads();
}
for(int kk=threadIdx.x; kk<n; kk+=blockDim.x)
{
atomicAdd(&bc[kk],delta_row[kk]); //Would need to check that kk != i here, but delta_row[kk] is guaranteed to be 0.
}
if(j == 0)
{
ind = atomicAdd(next_source,1);
if(approx)
{
i = source_vertices[ind];
}
else
{
i = ind;
}
}
__syncthreads();
if(ind == 2*DIAMETER_SAMPLES)
{
__shared__ int diameter_keys[DIAMETER_SAMPLES];
for(int kk = threadIdx.x; kk<DIAMETER_SAMPLES; kk+=blockDim.x)
{
diameter_keys[kk] = diameters[kk];
}
__syncthreads();
bitonic_sort(diameter_keys,DIAMETER_SAMPLES);
__syncthreads();
if(j == 0)
{
int log2n = 0;
int tempn = n;
while(tempn >>= 1)
{
++log2n;
}
if(diameter_keys[DIAMETER_SAMPLES/2] < 4*log2n) //Use the median
{
*jia = 1;
}
}
}
__syncthreads();
}
}
std::vector<float> bc_gpu(
graph g,
int max_threads_per_block,
int number_of_SMs,
program_options op,
const std::set<int> &source_vertices)
{
float *bc_gpu = new float[g.n];
int next_source = number_of_SMs;
float *bc_d, *delta_d;
int *d_d, *R_d, *C_d, *F_d, *Q_d, *Q2_d, *S_d, *endpoints_d, *next_source_d, *source_vertices_d;
unsigned long long *sigma_d;
size_t pitch_d, pitch_sigma, pitch_delta, pitch_Q, pitch_Q2, pitch_S, pitch_endpoints;
int *jia_d, *diameters_d;
dim3 dimGrid (number_of_SMs, 1, 1);
dim3 dimBlock (max_threads_per_block, 1, 1);
//Allocate and transfer data to the GPU
checkCudaErrors(hipMalloc((void**)&bc_d,sizeof(float)*g.n));
checkCudaErrors(hipMalloc((void**)&R_d,sizeof(int)*(g.n+1)));
checkCudaErrors(hipMalloc((void**)&C_d,sizeof(int)*(2*g.m)));
checkCudaErrors(hipMalloc((void**)&F_d,sizeof(int)*(2*g.m)));
checkCudaErrors(hipMallocPitch((void**)&d_d,&pitch_d,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(hipMallocPitch((void**)&sigma_d,&pitch_sigma,sizeof(unsigned long long)*g.n,dimGrid.x));
checkCudaErrors(hipMallocPitch((void**)&delta_d,&pitch_delta,sizeof(float)*g.n,dimGrid.x));
//Making Queues/Stack of size O(n) since we won't duplicate
checkCudaErrors(hipMallocPitch((void**)&Q_d,&pitch_Q,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(hipMallocPitch((void**)&Q2_d,&pitch_Q2,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(hipMallocPitch((void**)&S_d,&pitch_S,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(hipMallocPitch((void**)&endpoints_d,&pitch_endpoints,sizeof(int)*(g.n+1),dimGrid.x));
checkCudaErrors(hipMalloc((void**)&next_source_d,sizeof(int)));
// source_vertices of type "std::set" has no data() method
std::vector<int> source_vertices_h(source_vertices.size());
std::copy(source_vertices.begin(),source_vertices.end(),source_vertices_h.begin());
checkCudaErrors(hipMalloc((void**)&source_vertices_d, sizeof(int) * source_vertices.size()));
if(op.approx)
{
checkCudaErrors(hipMemcpy(source_vertices_d, source_vertices_h.data(),
sizeof(int) * source_vertices.size(), hipMemcpyHostToDevice));
}
checkCudaErrors(hipMalloc((void**)&jia_d,sizeof(int)));
checkCudaErrors(hipMalloc((void**)&diameters_d,sizeof(int)*DIAMETER_SAMPLES));
checkCudaErrors(hipMemset(jia_d,0,sizeof(int)));
checkCudaErrors(hipMemset(diameters_d,0,sizeof(int)*DIAMETER_SAMPLES));
checkCudaErrors(hipMemcpy(R_d,g.R,sizeof(int)*(g.n+1),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(C_d,g.C,sizeof(int)*(2*g.m),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(F_d,g.F,sizeof(int)*(2*g.m),hipMemcpyHostToDevice));
checkCudaErrors(hipMemset(bc_d,0,sizeof(float)*g.n));
checkCudaErrors(hipMemcpy(next_source_d,&next_source,sizeof(int),hipMemcpyHostToDevice));
int end;
bool approx;
if(op.approx)
{
end = op.k;
approx = true;
} else {
end = g.n;
approx = false;
}
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
hipLaunchKernelGGL(( bc_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0,
bc_d,
R_d,
C_d,
F_d,
g.n,
g.m,
d_d,
sigma_d,
delta_d,
Q_d,
Q2_d,
S_d,
endpoints_d,
next_source_d,
pitch_d,
pitch_sigma,
pitch_delta,
pitch_Q,
pitch_Q2,
pitch_S,
pitch_endpoints,
0,
end,
jia_d,
diameters_d,
source_vertices_d,
approx);
hipDeviceSynchronize();
auto stop = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
std::cout << "Kernel execution time " << time * 1e-9f << " (s)\n";
// GPU result
checkCudaErrors(hipMemcpy(bc_gpu,bc_d,sizeof(float)*g.n,hipMemcpyDeviceToHost));
checkCudaErrors(hipFree(bc_d));
checkCudaErrors(hipFree(R_d));
checkCudaErrors(hipFree(C_d));
checkCudaErrors(hipFree(F_d));
checkCudaErrors(hipFree(d_d));
checkCudaErrors(hipFree(sigma_d));
checkCudaErrors(hipFree(delta_d));
checkCudaErrors(hipFree(Q_d));
checkCudaErrors(hipFree(Q2_d));
checkCudaErrors(hipFree(S_d));
checkCudaErrors(hipFree(endpoints_d));
checkCudaErrors(hipFree(next_source_d));
checkCudaErrors(hipFree(jia_d));
checkCudaErrors(hipFree(diameters_d));
checkCudaErrors(hipFree(source_vertices_d));
//Copy GPU result to a vector
std::vector<float> bc_gpu_v(bc_gpu,bc_gpu+g.n);
for(int i=0; i<g.n; i++)
{
bc_gpu_v[i] /= 2.0f; //we don't want to double count the unweighted edges
}
delete[] bc_gpu;
return bc_gpu_v;
}
// query the properties of a single device for simplicity
void query_device(int &max_threads_per_block, int &number_of_SMs, program_options op)
{
op.device = 0;
checkCudaErrors(hipSetDevice(op.device));
hipDeviceProp_t prop;
checkCudaErrors(hipGetDeviceProperties(&prop, op.device));
std::cout << "Chosen Device: " << prop.name << std::endl;
std::cout << "Number of Multiprocessors: " << prop.multiProcessorCount << std::endl;
std::cout << "Size of Global Memory: " << prop.totalGlobalMem/(float)(1024*1024*1024)
<< " GB" << std::endl << std::endl;
max_threads_per_block = prop.maxThreadsPerBlock;
number_of_SMs = prop.multiProcessorCount;
}
| 8f630912141fbdfede87ac0eb1084e25f78bab0b.cu | #include <vector>
#include <iostream>
#include <chrono>
#include <cuda.h>
#include "util.h" // graph
#define DIAMETER_SAMPLES 512
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#ifndef checkCudaErrors
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
// These are the inline versions for all of the SDK helper functions
inline void __checkCudaErrors(cudaError_t err, const char *file, const int line)
{
if (cudaSuccess != err)
{
std::cerr << "CUDA Error = " << err << ": " << cudaGetErrorString(err) << " from file "
<< file << ", line " << line << std::endl;
}
}
#endif
//Note: N must be a power of two
//Simple/Naive bitonic sort. We're only sorting ~512 elements one time, so performance isn't important
__device__ void bitonic_sort(int *values, const int N)
{
unsigned int idx = threadIdx.x;
for (int k = 2; k <= N; k <<= 1)
{
for (int j = k >> 1; j > 0; j = j >> 1)
{
while(idx < N)
{
int ixj = idx^j;
if (ixj > idx)
{
if ((idx&k) == 0 && values[idx] > values[ixj])
{
//exchange(idx, ixj);
int tmp = values[idx];
values[idx] = values[ixj];
values[ixj] = tmp;
}
if ((idx&k) != 0 && values[idx] < values[ixj])
{
//exchange(idx, ixj);
int tmp = values[idx];
values[idx] = values[ixj];
values[ixj] = tmp;
}
}
idx += blockDim.x;
}
__syncthreads();
idx = threadIdx.x;
}
}
}
__global__ void bc_kernel(
float *__restrict__ bc,
const int *__restrict__ R,
const int *__restrict__ C,
const int *__restrict__ F,
const int n,
const int m,
const int *__restrict__ d,
const unsigned long long *__restrict__ sigma,
const float *__restrict__ delta,
const int *__restrict__ Q,
const int *__restrict__ Q2,
const int *__restrict__ S,
const int *__restrict__ endpoints,
int *__restrict__ next_source,
const size_t pitch_d,
const size_t pitch_sigma,
const size_t pitch_delta,
const size_t pitch_Q,
const size_t pitch_Q2,
const size_t pitch_S,
const size_t pitch_endpoints,
const int start,
const int end,
int *__restrict__ jia,
int *__restrict__ diameters,
const int *__restrict__ source_vertices,
const bool approx)
{
__shared__ int ind;
__shared__ int i;
__shared__ int *Q_row;
__shared__ int *Q2_row;
__shared__ int *S_row;
__shared__ int *endpoints_row;
int j = threadIdx.x;
int *d_row = (int*)((char*)d + blockIdx.x*pitch_d);
unsigned long long *sigma_row = (unsigned long long*)((char*)sigma + blockIdx.x*pitch_sigma);
float *delta_row = (float*)((char*)delta + blockIdx.x*pitch_delta);
if(j == 0)
{
ind = blockIdx.x + start;
i = approx ? source_vertices[ind] : ind;
Q_row = (int*)((char*)Q + blockIdx.x*pitch_Q);
Q2_row = (int*)((char*)Q2 + blockIdx.x*pitch_Q2);
S_row = (int*)((char*)S + blockIdx.x*pitch_S);
endpoints_row = (int*)((char*)endpoints + blockIdx.x*pitch_endpoints);
*jia = 0;
}
__syncthreads();
if((ind==0) && (j < DIAMETER_SAMPLES))
{
diameters[j] = INT_MAX;
}
__syncthreads();
while(ind < end)
{
//Initialization
for(int k=threadIdx.x; k<n; k+=blockDim.x)
{
if(k == i) //If k is the source node...
{
d_row[k] = 0;
sigma_row[k] = 1;
}
else
{
d_row[k] = INT_MAX;
sigma_row[k] = 0;
}
delta_row[k] = 0;
}
__syncthreads();
//Shortest Path Calculation
__shared__ int Q_len;
__shared__ int Q2_len;
__shared__ int S_len;
__shared__ int current_depth;
__shared__ int endpoints_len;
__shared__ bool sp_calc_done;
if(j == 0)
{
Q_row[0] = i;
Q_len = 1;
Q2_len = 0;
S_row[0] = i;
S_len = 1;
endpoints_row[0] = 0;
endpoints_row[1] = 1;
endpoints_len = 2;
current_depth = 0;
sp_calc_done = false;
}
__syncthreads();
//Do first iteration separately since we already know the edges to traverse
for(int r=threadIdx.x+R[i]; r<R[i+1]; r+=blockDim.x)
{
int w = C[r];
//No multiple/self edges - each value of w is unique, so no need for atomics
if(d_row[w] == INT_MAX)
{
d_row[w] = 1;
int t = atomicAdd(&Q2_len,1);
Q2_row[t] = w;
}
if(d_row[w] == (d_row[i]+1))
{
atomicAdd(&sigma_row[w],1);
}
}
__syncthreads();
if(Q2_len == 0)
{
sp_calc_done = true;
}
else
{
for(int kk=threadIdx.x; kk<Q2_len; kk+=blockDim.x)
{
Q_row[kk] = Q2_row[kk];
S_row[kk+S_len] = Q2_row[kk];
}
__syncthreads();
if(j == 0)
{
endpoints_row[endpoints_len] = endpoints_row[endpoints_len-1] + Q2_len;
endpoints_len++;
Q_len = Q2_len;
S_len += Q2_len;
Q2_len = 0;
current_depth++;
}
}
__syncthreads();
while(!sp_calc_done)
{
if((*jia) && (Q_len > 512))
{
for(int k=threadIdx.x; k<2*m; k+=blockDim.x)
{
int v = F[k];
if(d_row[v] == current_depth)
{
int w = C[k];
if(atomicCAS(&d_row[w],INT_MAX,d_row[v]+1) == INT_MAX)
{
int t = atomicAdd(&Q2_len,1);
Q2_row[t] = w;
}
if(d_row[w] == (d_row[v]+1))
{
atomicAdd(&sigma_row[w],sigma_row[v]);
}
}
}
}
else
{
__shared__ int next_index;
if(j == 0)
{
next_index = blockDim.x;
}
__syncthreads();
int k = threadIdx.x; //Initial vertices
while(k < Q_len)
{
int v = Q_row[k];
for(int r=R[v]; r<R[v+1]; r++)
{
int w = C[r];
//Use atomicCAS to prevent duplicates
if(atomicCAS(&d_row[w],INT_MAX,d_row[v]+1) == INT_MAX)
{
int t = atomicAdd(&Q2_len,1);
Q2_row[t] = w;
}
if(d_row[w] == (d_row[v]+1))
{
atomicAdd(&sigma_row[w],sigma_row[v]);
}
}
k = atomicAdd(&next_index,1);
}
}
__syncthreads();
if(Q2_len == 0) //If there is no additional work found, we're done
{
break;
}
else //If there is additional work, transfer elements from Q2 to Q, reset lengths, and add vertices to the stack
{
for(int kk=threadIdx.x; kk<Q2_len; kk+=blockDim.x)
{
Q_row[kk] = Q2_row[kk];
S_row[kk+S_len] = Q2_row[kk];
}
__syncthreads();
if(j == 0)
{
endpoints_row[endpoints_len] = endpoints_row[endpoints_len-1] + Q2_len;
endpoints_len++;
Q_len = Q2_len;
S_len += Q2_len;
Q2_len = 0;
current_depth++;
}
__syncthreads();
}
}
//The elements at the end of the stack will have the largest distance from the source
//Using the successor method, we can start from one depth earlier
if(j == 0)
{
current_depth = d_row[S_row[S_len-1]] - 1;
if(ind<DIAMETER_SAMPLES)
{
diameters[ind] = current_depth+1;
}
}
__syncthreads();
//Dependency Accumulation (Madduri/Ediger successor method)
while(current_depth > 0)
{
int stack_iter_len = endpoints_row[current_depth+1]-endpoints_row[current_depth];
if((*jia) && (stack_iter_len>512))
{
for(int kk=threadIdx.x; kk<2*m; kk+=blockDim.x)
{
int w = F[kk];
if(d_row[w] == current_depth)
{
int v = C[kk];
if(d_row[v] == (d_row[w]+1))
{
float change = (sigma_row[w]/(float)sigma_row[v])*(1.0f+delta_row[v]);
atomicAdd(&delta_row[w],change);
}
}
}
}
else
{
for(int kk=threadIdx.x+endpoints_row[current_depth]; kk<endpoints_row[current_depth+1]; kk+=blockDim.x)
{
int w = S_row[kk];
float dsw = 0;
float sw = (float)sigma_row[w];
for(int z=R[w]; z<R[w+1]; z++)
{
int v = C[z];
if(d_row[v] == (d_row[w]+1))
{
dsw += (sw/(float)sigma_row[v])*(1.0f+delta_row[v]);
}
}
delta_row[w] = dsw;
}
}
__syncthreads();
if(j == 0)
{
current_depth--;
}
__syncthreads();
}
for(int kk=threadIdx.x; kk<n; kk+=blockDim.x)
{
atomicAdd(&bc[kk],delta_row[kk]); //Would need to check that kk != i here, but delta_row[kk] is guaranteed to be 0.
}
if(j == 0)
{
ind = atomicAdd(next_source,1);
if(approx)
{
i = source_vertices[ind];
}
else
{
i = ind;
}
}
__syncthreads();
if(ind == 2*DIAMETER_SAMPLES)
{
__shared__ int diameter_keys[DIAMETER_SAMPLES];
for(int kk = threadIdx.x; kk<DIAMETER_SAMPLES; kk+=blockDim.x)
{
diameter_keys[kk] = diameters[kk];
}
__syncthreads();
bitonic_sort(diameter_keys,DIAMETER_SAMPLES);
__syncthreads();
if(j == 0)
{
int log2n = 0;
int tempn = n;
while(tempn >>= 1)
{
++log2n;
}
if(diameter_keys[DIAMETER_SAMPLES/2] < 4*log2n) //Use the median
{
*jia = 1;
}
}
}
__syncthreads();
}
}
std::vector<float> bc_gpu(
graph g,
int max_threads_per_block,
int number_of_SMs,
program_options op,
const std::set<int> &source_vertices)
{
float *bc_gpu = new float[g.n];
int next_source = number_of_SMs;
float *bc_d, *delta_d;
int *d_d, *R_d, *C_d, *F_d, *Q_d, *Q2_d, *S_d, *endpoints_d, *next_source_d, *source_vertices_d;
unsigned long long *sigma_d;
size_t pitch_d, pitch_sigma, pitch_delta, pitch_Q, pitch_Q2, pitch_S, pitch_endpoints;
int *jia_d, *diameters_d;
dim3 dimGrid (number_of_SMs, 1, 1);
dim3 dimBlock (max_threads_per_block, 1, 1);
//Allocate and transfer data to the GPU
checkCudaErrors(cudaMalloc((void**)&bc_d,sizeof(float)*g.n));
checkCudaErrors(cudaMalloc((void**)&R_d,sizeof(int)*(g.n+1)));
checkCudaErrors(cudaMalloc((void**)&C_d,sizeof(int)*(2*g.m)));
checkCudaErrors(cudaMalloc((void**)&F_d,sizeof(int)*(2*g.m)));
checkCudaErrors(cudaMallocPitch((void**)&d_d,&pitch_d,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(cudaMallocPitch((void**)&sigma_d,&pitch_sigma,sizeof(unsigned long long)*g.n,dimGrid.x));
checkCudaErrors(cudaMallocPitch((void**)&delta_d,&pitch_delta,sizeof(float)*g.n,dimGrid.x));
//Making Queues/Stack of size O(n) since we won't duplicate
checkCudaErrors(cudaMallocPitch((void**)&Q_d,&pitch_Q,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(cudaMallocPitch((void**)&Q2_d,&pitch_Q2,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(cudaMallocPitch((void**)&S_d,&pitch_S,sizeof(int)*g.n,dimGrid.x));
checkCudaErrors(cudaMallocPitch((void**)&endpoints_d,&pitch_endpoints,sizeof(int)*(g.n+1),dimGrid.x));
checkCudaErrors(cudaMalloc((void**)&next_source_d,sizeof(int)));
// source_vertices of type "std::set" has no data() method
std::vector<int> source_vertices_h(source_vertices.size());
std::copy(source_vertices.begin(),source_vertices.end(),source_vertices_h.begin());
checkCudaErrors(cudaMalloc((void**)&source_vertices_d, sizeof(int) * source_vertices.size()));
if(op.approx)
{
checkCudaErrors(cudaMemcpy(source_vertices_d, source_vertices_h.data(),
sizeof(int) * source_vertices.size(), cudaMemcpyHostToDevice));
}
checkCudaErrors(cudaMalloc((void**)&jia_d,sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&diameters_d,sizeof(int)*DIAMETER_SAMPLES));
checkCudaErrors(cudaMemset(jia_d,0,sizeof(int)));
checkCudaErrors(cudaMemset(diameters_d,0,sizeof(int)*DIAMETER_SAMPLES));
checkCudaErrors(cudaMemcpy(R_d,g.R,sizeof(int)*(g.n+1),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(C_d,g.C,sizeof(int)*(2*g.m),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(F_d,g.F,sizeof(int)*(2*g.m),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset(bc_d,0,sizeof(float)*g.n));
checkCudaErrors(cudaMemcpy(next_source_d,&next_source,sizeof(int),cudaMemcpyHostToDevice));
int end;
bool approx;
if(op.approx)
{
end = op.k;
approx = true;
} else {
end = g.n;
approx = false;
}
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
bc_kernel<<<dimGrid,dimBlock>>>(
bc_d,
R_d,
C_d,
F_d,
g.n,
g.m,
d_d,
sigma_d,
delta_d,
Q_d,
Q2_d,
S_d,
endpoints_d,
next_source_d,
pitch_d,
pitch_sigma,
pitch_delta,
pitch_Q,
pitch_Q2,
pitch_S,
pitch_endpoints,
0,
end,
jia_d,
diameters_d,
source_vertices_d,
approx);
cudaDeviceSynchronize();
auto stop = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count();
std::cout << "Kernel execution time " << time * 1e-9f << " (s)\n";
// GPU result
checkCudaErrors(cudaMemcpy(bc_gpu,bc_d,sizeof(float)*g.n,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(bc_d));
checkCudaErrors(cudaFree(R_d));
checkCudaErrors(cudaFree(C_d));
checkCudaErrors(cudaFree(F_d));
checkCudaErrors(cudaFree(d_d));
checkCudaErrors(cudaFree(sigma_d));
checkCudaErrors(cudaFree(delta_d));
checkCudaErrors(cudaFree(Q_d));
checkCudaErrors(cudaFree(Q2_d));
checkCudaErrors(cudaFree(S_d));
checkCudaErrors(cudaFree(endpoints_d));
checkCudaErrors(cudaFree(next_source_d));
checkCudaErrors(cudaFree(jia_d));
checkCudaErrors(cudaFree(diameters_d));
checkCudaErrors(cudaFree(source_vertices_d));
//Copy GPU result to a vector
std::vector<float> bc_gpu_v(bc_gpu,bc_gpu+g.n);
for(int i=0; i<g.n; i++)
{
bc_gpu_v[i] /= 2.0f; //we don't want to double count the unweighted edges
}
delete[] bc_gpu;
return bc_gpu_v;
}
// query the properties of a single device for simplicity
void query_device(int &max_threads_per_block, int &number_of_SMs, program_options op)
{
op.device = 0;
checkCudaErrors(cudaSetDevice(op.device));
cudaDeviceProp prop;
checkCudaErrors(cudaGetDeviceProperties(&prop, op.device));
std::cout << "Chosen Device: " << prop.name << std::endl;
std::cout << "Number of Multiprocessors: " << prop.multiProcessorCount << std::endl;
std::cout << "Size of Global Memory: " << prop.totalGlobalMem/(float)(1024*1024*1024)
<< " GB" << std::endl << std::endl;
max_threads_per_block = prop.maxThreadsPerBlock;
number_of_SMs = prop.multiProcessorCount;
}
|
2f1cd0fb77aaea1de44bdd89b38bc03892094047.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgeadd2.cu, normal z -> c, Mon Jun 25 18:24:10 2018
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset.
*/
__global__
void cgeadd2_full(
int m, int n,
magmaFloatComplex alpha,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex beta,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + beta*dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + beta*dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD2 adds two matrices, dB = alpha*dA + beta*dB.
@see ZGEADD for dB = alpha*dA + dB, lacking beta.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha COMPLEX
The scalar alpha.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
beta COMPLEX
The scalar beta.
@param[in,out]
dB COMPLEX array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_cgeadd2(
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex beta,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipLaunchKernelGGL(( cgeadd2_full), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dA, ldda, beta, dB, lddb );
}
| 2f1cd0fb77aaea1de44bdd89b38bc03892094047.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/zgeadd2.cu, normal z -> c, Mon Jun 25 18:24:10 2018
@author Mark Gates
*/
#include "magma_internal.h"
#define BLK_X 64
#define BLK_Y 32
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to claset.
*/
__global__
void cgeadd2_full(
int m, int n,
magmaFloatComplex alpha,
const magmaFloatComplex *dA, int ldda,
magmaFloatComplex beta,
magmaFloatComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + beta*dB[j*lddb];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = alpha*dA[j*ldda] + beta*dB[j*lddb];
}
}
}
}
/***************************************************************************//**
Purpose
-------
ZGEADD2 adds two matrices, dB = alpha*dA + beta*dB.
@see ZGEADD for dB = alpha*dA + dB, lacking beta.
Arguments
---------
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
alpha COMPLEX
The scalar alpha.
@param[in]
dA COMPLEX array, dimension (LDDA,N)
The m by n matrix dA.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
beta COMPLEX
The scalar beta.
@param[in,out]
dB COMPLEX array, dimension (LDDB,N)
The m by n matrix dB.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_geadd
*******************************************************************************/
extern "C" void
magmablas_cgeadd2(
magma_int_t m, magma_int_t n,
magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex beta,
magmaFloatComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 )
return;
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ) );
cgeadd2_full<<< grid, threads, 0, queue->cuda_stream() >>>
( m, n, alpha, dA, ldda, beta, dB, lddb );
}
|
d3766c7ded15a435ecc2536a2a187a91e821b979.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "functions.h"
#include "tree.h"
#include "generator.h"
#include "bruteforce.h"
#include "tree_creator.h"
#include "print.h"
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <list>
#include <vector>
__global__ void assignMaskValues(unsigned int *keys, int *values, unsigned int *masks, int size){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
if (i % 2){
values[i/2] = masks[i];
}
else{
keys[i/2] = masks[i];
}
}
}
__global__ void assignSortedMaskValues(unsigned int *masks, unsigned int *keys, int *values, int size){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
if (i % 2){
masks[i] = values[i / 2];
}
else{
masks[i] = keys[i / 2];
}
}
}
void sortMasks(unsigned int *masks, int masks_size){
unsigned int *d_keys, *d_masks; int *d_values;
unsigned int *keys = (unsigned int*)malloc((masks_size/2)*sizeof(unsigned int));
int *values = (int*)malloc((masks_size/2)*sizeof(int));
hipMalloc((void**)&d_masks, masks_size*sizeof(unsigned int));
hipMalloc((void**)&d_keys, (masks_size / 2)*sizeof(unsigned int));
hipMalloc((void**)&d_values, (masks_size/2)*sizeof(int));
hipMemcpy(d_masks, masks, masks_size*sizeof(unsigned int), hipMemcpyHostToDevice);
assignMaskValues << <(masks_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_keys, d_values, d_masks, masks_size);
hipMemcpy(keys, d_keys, (masks_size / 2)*sizeof(unsigned int), hipMemcpyDeviceToHost);
hipMemcpy(values, d_values, (masks_size / 2)*sizeof(int), hipMemcpyDeviceToHost);
/*---------------- SORT---------------*/
thrust::sort_by_key(keys, keys + masks_size/2, values);
hipMemcpy(d_keys, keys, (masks_size / 2)*sizeof(unsigned int), hipMemcpyHostToDevice);
hipMemcpy(d_values, values, (masks_size / 2)*sizeof(unsigned int), hipMemcpyHostToDevice);
assignSortedMaskValues << <(masks_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_masks, d_keys, d_values, masks_size);
hipMemcpy(masks, d_masks, masks_size*sizeof(unsigned int), hipMemcpyDeviceToHost);
free(keys); free(values);
hipFree(d_keys); hipFree(d_masks); hipFree(d_values);
}
int main()
{
hipError_t cudaStatus;
hipEvent_t start, stop, start_tree, stop_tree;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&start_tree);
hipEventCreate(&stop_tree);
float elapsedTime;
///////////////////////////// INIT IPS AND MASKS ///////////////////////////
//init ips and masks
unsigned int *ips = (unsigned int*)malloc(NUM_IPS * sizeof(unsigned int));
unsigned int *masks = (unsigned int*)malloc(NUM_MASKS * sizeof(unsigned int) * 2);
unsigned int *assignedMasks = (unsigned int*)malloc(NUM_IPS * sizeof(unsigned int) * 2);
printf("mem for IPs and MASKs allocated.\n");
////////////////////////////////////////////////////////////////////////////
//warmup by empty kernel
init();
generate_ip_addresses(ips);
printf("IPs generated on CPU\n");
generate_ip_masks(masks);
printf("Masks generated on CPU\n");
///////////////////////////// SORT ///////////////////////////////////
sortMasks(masks, NUM_MASKS*2);
printf("Masks sorted.");
////////////////////////////// BRUTE FORCE //////////////////////////////////
hipEventRecord(start);
bruteforce(ips, masks, assignedMasks);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("time elapsed for bruteforce: %f\n", elapsedTime);
writeToFile(ips, assignedMasks);
////////////////////////////// TREE /////////////////////////////////\
MaskList masksList;
masksList.masks = (int*)malloc(NUM_MASKS*sizeof(int));
masksList.prefixes = (u_char*)malloc(NUM_MASKS*sizeof(u_char));
masksList.removed = (u_char*)calloc(NUM_MASKS, sizeof(u_char));
int j = 0;
for (int i = 0; i < (NUM_MASKS * 2); i++){
masksList.masks[j] = masks[i];
masksList.prefixes[j] = masks[++i];
j++;
}
hipEventRecord(start_tree);
TreeNode *root = createTreeImproved(masksList, NUM_MASKS);
printf("Tree created.\n");
//TODO tree search algorithm
traverseTree(ips, root, NUM_MASKS);
hipEventRecord(stop_tree);
hipEventSynchronize(stop_tree);
hipEventElapsedTime(&elapsedTime, start_tree, stop_tree);
printf("time elapsed for tree search: %f\n", elapsedTime);
//destroy_treenode(root);
free(root);
free(ips);
free(masks);
free(assignedMasks);
free(masksList.masks);
free(masksList.prefixes);
free(masksList.removed);
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
| d3766c7ded15a435ecc2536a2a187a91e821b979.cu | #include "stdio.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "functions.h"
#include "tree.h"
#include "generator.h"
#include "bruteforce.h"
#include "tree_creator.h"
#include "print.h"
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/sort.h>
#include <list>
#include <vector>
__global__ void assignMaskValues(unsigned int *keys, int *values, unsigned int *masks, int size){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
if (i % 2){
values[i/2] = masks[i];
}
else{
keys[i/2] = masks[i];
}
}
}
__global__ void assignSortedMaskValues(unsigned int *masks, unsigned int *keys, int *values, int size){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
if (i % 2){
masks[i] = values[i / 2];
}
else{
masks[i] = keys[i / 2];
}
}
}
void sortMasks(unsigned int *masks, int masks_size){
unsigned int *d_keys, *d_masks; int *d_values;
unsigned int *keys = (unsigned int*)malloc((masks_size/2)*sizeof(unsigned int));
int *values = (int*)malloc((masks_size/2)*sizeof(int));
cudaMalloc((void**)&d_masks, masks_size*sizeof(unsigned int));
cudaMalloc((void**)&d_keys, (masks_size / 2)*sizeof(unsigned int));
cudaMalloc((void**)&d_values, (masks_size/2)*sizeof(int));
cudaMemcpy(d_masks, masks, masks_size*sizeof(unsigned int), cudaMemcpyHostToDevice);
assignMaskValues << <(masks_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_keys, d_values, d_masks, masks_size);
cudaMemcpy(keys, d_keys, (masks_size / 2)*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaMemcpy(values, d_values, (masks_size / 2)*sizeof(int), cudaMemcpyDeviceToHost);
/*---------------- SORT---------------*/
thrust::sort_by_key(keys, keys + masks_size/2, values);
cudaMemcpy(d_keys, keys, (masks_size / 2)*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaMemcpy(d_values, values, (masks_size / 2)*sizeof(unsigned int), cudaMemcpyHostToDevice);
assignSortedMaskValues << <(masks_size + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK, THREADS_PER_BLOCK >> >(d_masks, d_keys, d_values, masks_size);
cudaMemcpy(masks, d_masks, masks_size*sizeof(unsigned int), cudaMemcpyDeviceToHost);
free(keys); free(values);
cudaFree(d_keys); cudaFree(d_masks); cudaFree(d_values);
}
int main()
{
cudaError_t cudaStatus;
cudaEvent_t start, stop, start_tree, stop_tree;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start_tree);
cudaEventCreate(&stop_tree);
float elapsedTime;
///////////////////////////// INIT IPS AND MASKS ///////////////////////////
//init ips and masks
unsigned int *ips = (unsigned int*)malloc(NUM_IPS * sizeof(unsigned int));
unsigned int *masks = (unsigned int*)malloc(NUM_MASKS * sizeof(unsigned int) * 2);
unsigned int *assignedMasks = (unsigned int*)malloc(NUM_IPS * sizeof(unsigned int) * 2);
printf("mem for IPs and MASKs allocated.\n");
////////////////////////////////////////////////////////////////////////////
//warmup by empty kernel
init();
generate_ip_addresses(ips);
printf("IPs generated on CPU\n");
generate_ip_masks(masks);
printf("Masks generated on CPU\n");
///////////////////////////// SORT ///////////////////////////////////
sortMasks(masks, NUM_MASKS*2);
printf("Masks sorted.");
////////////////////////////// BRUTE FORCE //////////////////////////////////
cudaEventRecord(start);
bruteforce(ips, masks, assignedMasks);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("time elapsed for bruteforce: %f\n", elapsedTime);
writeToFile(ips, assignedMasks);
////////////////////////////// TREE /////////////////////////////////\
MaskList masksList;
masksList.masks = (int*)malloc(NUM_MASKS*sizeof(int));
masksList.prefixes = (u_char*)malloc(NUM_MASKS*sizeof(u_char));
masksList.removed = (u_char*)calloc(NUM_MASKS, sizeof(u_char));
int j = 0;
for (int i = 0; i < (NUM_MASKS * 2); i++){
masksList.masks[j] = masks[i];
masksList.prefixes[j] = masks[++i];
j++;
}
cudaEventRecord(start_tree);
TreeNode *root = createTreeImproved(masksList, NUM_MASKS);
printf("Tree created.\n");
//TODO tree search algorithm
traverseTree(ips, root, NUM_MASKS);
cudaEventRecord(stop_tree);
cudaEventSynchronize(stop_tree);
cudaEventElapsedTime(&elapsedTime, start_tree, stop_tree);
printf("time elapsed for tree search: %f\n", elapsedTime);
//destroy_treenode(root);
free(root);
free(ips);
free(masks);
free(assignedMasks);
free(masksList.masks);
free(masksList.prefixes);
free(masksList.removed);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
09476476cc873576ae7d22e54313e3a9656ee543.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* This project is dual licensed. You may license this software under one of the
following licences:
+ Creative Commons Attribution-Share Alike 3.0 Unported License
http://creativecommons.org/licenses/by-nc-sa/3.0/
+ GNU GENERAL PUBLIC LICENSE v3, designated as a "BY-SA Compatible License"
as defined in BY-SA 4.0 on 8 October 2015
* See the LICENSE file in the root directory of this source tree for full
copyright disclosure, and other details.
*/
/* Header files */
/* Constants */
#define threads 256 /* It's the number of threads we are going to use per block on the GPU */
using namespace std;
/* Kernels */
/* This kernel counts the number of pairs in the data file */
/* We will use this kernel to calculate real-real pairs and random-random pairs */
/* This kernel counts the number of pairs that there are between two data groups */
/* We will use this kernel to calculate real-random pairs and real_1-real_2 pairs (cross-correlation) */
/* NOTE that this kernel has NOT been merged with 'binning' above: this is for speed optimization, we avoid passing extra variables to the GPU */
__global__ void binning(float *xd,float *yd,float *zd,float *ZZ,int number_lines,int points_per_degree, int number_of_degrees)
{
/* We define variables (arrays) in shared memory */
float angle;
__shared__ float temp[threads];
/* We define an index to run through these two arrays */
int index = threadIdx.x;
/* This variable is necesary to accelerate the calculation, it's due that "temp" was definied in the shared memory too */
temp[index]=0;
float x,y,z; //MCM
float xx,yy,zz; //MCM
/* We start the counting */
for (int i=0;i<number_lines;i++)
{
x = xd[i];//MCM
y = yd[i];//MCM
z = zd[i];//MCM
/* The "while" replaces the second for-loop in the sequential calculation case (CPU). We use "while" rather than "if" as recommended in the book "Cuda by Example" */
for(int dim_idx = blockIdx.x * blockDim.x + threadIdx.x;
dim_idx < number_lines;
dim_idx += blockDim.x * gridDim.x)
{
xx = xd[dim_idx];//MCM
yy = yd[dim_idx];//MCM
zz = zd[dim_idx];//MCM
/* We make the dot product */
angle = x * xx + y * yy + z * zz;//MCM
//angle[index]=xd[i]*xd[dim_idx]+yd[i]*yd[dim_idx]+zd[i]*zd[dim_idx];//MCM
//__syncthreads();//MCM
/* Sometimes "angle" is higher than one, due to numnerical precision, to solve it we use the next sentence */
angle=fminf(angle,1.0);
angle=acosf(angle)*180.0/M_PI;
//__syncthreads();//MCM
/* We finally count the number of pairs separated an angular distance "angle", always in shared memory */
if(angle < number_of_degrees)
{
atomicAdd( &temp[int(angle*points_per_degree)], 1.0);
}
__syncthreads();
}
}
/* We copy the number of pairs from shared memory to global memory */
atomicAdd( &ZZ[threadIdx.x] , temp[threadIdx.x]);
__syncthreads();
} | 09476476cc873576ae7d22e54313e3a9656ee543.cu | #include "includes.h"
/*
* This project is dual licensed. You may license this software under one of the
following licences:
+ Creative Commons Attribution-Share Alike 3.0 Unported License
http://creativecommons.org/licenses/by-nc-sa/3.0/
+ GNU GENERAL PUBLIC LICENSE v3, designated as a "BY-SA Compatible License"
as defined in BY-SA 4.0 on 8 October 2015
* See the LICENSE file in the root directory of this source tree for full
copyright disclosure, and other details.
*/
/* Header files */
/* Constants */
#define threads 256 /* It's the number of threads we are going to use per block on the GPU */
using namespace std;
/* Kernels */
/* This kernel counts the number of pairs in the data file */
/* We will use this kernel to calculate real-real pairs and random-random pairs */
/* This kernel counts the number of pairs that there are between two data groups */
/* We will use this kernel to calculate real-random pairs and real_1-real_2 pairs (cross-correlation) */
/* NOTE that this kernel has NOT been merged with 'binning' above: this is for speed optimization, we avoid passing extra variables to the GPU */
__global__ void binning(float *xd,float *yd,float *zd,float *ZZ,int number_lines,int points_per_degree, int number_of_degrees)
{
/* We define variables (arrays) in shared memory */
float angle;
__shared__ float temp[threads];
/* We define an index to run through these two arrays */
int index = threadIdx.x;
/* This variable is necesary to accelerate the calculation, it's due that "temp" was definied in the shared memory too */
temp[index]=0;
float x,y,z; //MCM
float xx,yy,zz; //MCM
/* We start the counting */
for (int i=0;i<number_lines;i++)
{
x = xd[i];//MCM
y = yd[i];//MCM
z = zd[i];//MCM
/* The "while" replaces the second for-loop in the sequential calculation case (CPU). We use "while" rather than "if" as recommended in the book "Cuda by Example" */
for(int dim_idx = blockIdx.x * blockDim.x + threadIdx.x;
dim_idx < number_lines;
dim_idx += blockDim.x * gridDim.x)
{
xx = xd[dim_idx];//MCM
yy = yd[dim_idx];//MCM
zz = zd[dim_idx];//MCM
/* We make the dot product */
angle = x * xx + y * yy + z * zz;//MCM
//angle[index]=xd[i]*xd[dim_idx]+yd[i]*yd[dim_idx]+zd[i]*zd[dim_idx];//MCM
//__syncthreads();//MCM
/* Sometimes "angle" is higher than one, due to numnerical precision, to solve it we use the next sentence */
angle=fminf(angle,1.0);
angle=acosf(angle)*180.0/M_PI;
//__syncthreads();//MCM
/* We finally count the number of pairs separated an angular distance "angle", always in shared memory */
if(angle < number_of_degrees)
{
atomicAdd( &temp[int(angle*points_per_degree)], 1.0);
}
__syncthreads();
}
}
/* We copy the number of pairs from shared memory to global memory */
atomicAdd( &ZZ[threadIdx.x] , temp[threadIdx.x]);
__syncthreads();
} |
43a7cc56f0c6d08caa29ae0239b445c1aa4852fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* test_util.cu
*
* Created on: 22-Mar-2009
* Author: alee
*/
#include <stdio.h>
#include "gauss.h"
#include <cutil.h>
#include "reduce.h"
#include "scan.h"
#include "mix_gauss.h"
#include "mix_gauss_uniform.h"
#include "MRG.h"
#include "xorshift.h"
#include "rng.h"
#include "output.h"
#include "matrix.h"
__global__ void logtest(int size, float* d_array, int M) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tt = blockDim.x * gridDim.x;
int i, j;
float x;
for (i = tid; i < size; i += tt) {
x = i;
for (j = 0; j < M; j++) {
x = logf(x);
x = expf(x);
}
d_array[i] = x;
}
}
void logtestref(int size, float* array, int M) {
float x;
for (int i = 0; i < size; i++) {
x = (float) i;
for (int j = 0; j < M; j++) {
x = logf(x);
x = expf(x);
}
array[i] = x;
}
}
void testLogSpeed(int N, int M) {
unsigned int hTimer;
double gtime, ctime;
cutCreateTimer(&hTimer);
float* array = (float*) malloc(N * sizeof(float));
float* d_array;
hipMalloc((void**) &d_array, N * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
hipLaunchKernelGGL(( logtest), dim3(256),dim3(64), 0, 0, N, d_array, M);
hipDeviceSynchronize();
cutStopTimer(hTimer);
gtime = cutGetTimerValue(hTimer);
printf("log test time = %f\n", gtime);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
logtestref(N, array, M);
cutStopTimer(hTimer);
ctime = cutGetTimerValue(hTimer);
printf("ref log test time = %f\n", ctime);
// float* h_array = (float*) malloc(N * sizeof(float));
// hipMemcpy(h_array, d_array, N * sizeof(float), hipMemcpyDeviceToHost);
// for (int i = 0; i < 200; i++) {
// printf("%f %f\n", h_array[i], array[i]);
// }
// free(h_array);
printf("speedup = %f\n", ctime / gtime);
free(array);
hipFree(d_array);
}
void test_reduce2D(int N, int nb, int nt) {
const int D = 2;
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* array = (float*) malloc(N * D * sizeof(float));
float* d_array;
hipMalloc((void **) &d_array, N * D * sizeof(float));
// populate_rand(array, N * D);
float c = 1;
for (int i = 0; i < N * D; i++) {
if (i % D == 1) {
c = 1;
} else {
c = .5;
}
array[i] = c;
}
hipMemcpy(d_array, array, sizeof(float) * N * D, hipMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
double sumh1 = 0;
double sumh2 = 0;
for (int i = 0; i < N; i++) {
sumh1 += array[i * 2];
sumh2 += array[i * 2 + 1];
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Host reduce (2D): sum1 = %f, sum2 = %f\n", sumh1, sumh2);
printf("Time = %f\n\n", time);
float sum[2];
cutResetTimer(hTimer);
cutStartTimer(hTimer);
reduce(N, D, d_array, sum, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("reduce_2D: sum1 = %f, sum2 = %f\n", sum[0], sum[1]);
printf("Time = %f\n\n", time);
free(array);
hipFree(d_array);
}
void test_reduceMD(int N, int D, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* array = (float*) malloc(N * D * sizeof(float));
float* d_array;
hipMalloc((void **) &d_array, N * D * sizeof(float));
// populate_rand_XS(array, N * D);
float c = 1;
for (int i = 0; i < N * D; i++) {
c = (float) (i % D) + 1;
array[i] = c;
}
hipMemcpy(d_array, array, sizeof(float) * N * D, hipMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
// double* sumh = (double*) malloc(D * sizeof(double));
float* sumh = (float*) malloc(D * sizeof(float));
for (int j = 0; j < D; j++) {
sumh[j] = 0;
}
for (int i = 0; i < N; i++) {
for (int j = 0; j < D; j++) {
sumh[j] += array[i * D + j];
}
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Host reduce (MD): (");
for (int i = 0; i < D; i++) {
printf("%f,", sumh[i]);
}
printf(")\n");
printf("Time = %f\n\n", time);
float* sum = (float*) malloc(D * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
reduce(N, D, d_array, sum, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Reduce (MD): (");
for (int i = 0; i < D; i++) {
printf("%f,", sum[i]);
}
printf(")\n");
printf("Time = %f\n\n", time);
free(array);
hipFree(d_array);
free(sum);
free(sumh);
}
void test_float() {
float f = (float) 67108868.0f;
double d = (double) 67108868;
// float f = 0;
// double d = 0;
// for (int i = 0; i < 20000000; i++) {
// f = f + 4.0;
// d = d + 4.0;
// }
printf("float = %f\n", f);
printf("double = %f\n", d);
// printf("%f\n", 45000000.0f * 4.0f);
}
void test_reduce(int N, int nb, int nt) {
// const int N = 4194304;
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* array = (float*) malloc(N * sizeof(float));
float* d_array;
hipMalloc((void **) &d_array, N * sizeof(float));
for (int i = 0; i < N; i++) {
array[i] = 0.5;
}
hipMemcpy(d_array, array, sizeof(float) * N, hipMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
float sum = 0;
double sumh = 0;
for (int i = 0; i < N; i++) {
sumh += array[i];
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Host reduce: sum = %f\n", sumh);
printf("Time = %f\n\n", time);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
reduce(N, d_array, sum, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("reduce: sum = %f\n", sum);
printf("Time = %f\n\n", time);
free(array);
hipFree(d_array);
}
void test_matrix() {
const int MA = 3;
const int NA = 4;
float A[12] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
const int MB = 4;
const int NB = 3;
float B[12] = { 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
const int MC = MA;
const int NC = NB;
float C[MC * NC];
matrix_times(A, B, C, MA, NA, MB, NB);
matrix_print(C, MC, NC);
int ME = 3;
int NE = 3;
float E[9] = { 1, 2, 2, 5, 7, 3, 4, 8, 7 };
float EI[9];
printf("1\n");
matrix_inverse(E, EI, ME);
printf("2\n");
matrix_print(EI, ME, NE);
printf("3\n");
printf("|E| = %f\n", matrix_det(E, ME));
printf("|EI| = %f\n", matrix_det(EI, ME));
int MF = 4;
int NF = 4;
float F[16] = { 1, 8, 6, 3, 2, 7, 4, 0, -5, 4, -8, 3, 2, -6, 9, 3 };
float FI[16];
matrix_inverse(F, FI, MF);
matrix_print(FI, MF, NF);
printf("|F| = %f\n", matrix_det(F, MF));
printf("|FI| = %f\n", matrix_det(FI, MF));
float x[4] = { 3, 5, 2, 7 };
float r = matrix_xtmx(F, x, 4);
printf("r = %f\n", r);
}
void test_gaussmv() {
const int D = 3;
float x[3] = { 0.5, 1, 2 };
float mu[3] = { 2, 1, 4 };
float cov[9] = { 3, 0.5, 3, 0.5, 2, 0, 3, 0, 4 };
float c2[9];
float c1;
compute_c1_c2(cov, D, c1, c2);
float* h_args = (float*) malloc((1 + D + D * D) * sizeof(float));
h_args[0] = c1;
h_args[1] = c2[0];
h_args[2] = c2[1];
h_args[3] = c2[2];
h_args[4] = c2[3];
h_args[5] = c2[4];
h_args[6] = c2[5];
h_args[7] = c2[6];
h_args[8] = c2[7];
h_args[9] = c2[8];
h_args[10] = mu[0];
h_args[11] = mu[1];
h_args[12] = mu[2];
float r = gauss_pdfh(x, c1, c2, mu, D);
printf("r = %f\n", r);
r = gauss_pdfh(x, h_args, D);
printf("r = %f\n", r);
free(h_args);
}
void test_scan(int N) {
unsigned int hTimer;
double ctime, gtime;
cutCreateTimer(&hTimer);
float* array = (float*) malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
array[i] = 0.5;
}
float* array_out = (float*) malloc(N * sizeof(float));
float* d_array;
hipMalloc((void **) &d_array, N * sizeof(float));
float* d_array_out;
hipMalloc((void **) &d_array_out, N * sizeof(float));
hipMemcpy(d_array, array, sizeof(float) * N, hipMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
scan(N, d_array, d_array_out, 32, 32);
cutStopTimer(hTimer);
gtime = cutGetTimerValue(hTimer);
printf("GPU Time = %f\n\n", gtime);
hipDeviceSynchronize();
hipMemcpy(array_out, d_array_out, sizeof(float) * N, hipMemcpyDeviceToHost);
// for (int i = 0; i < N; i++) {
// printf("%f ", array_out[i]);
// }
// printf("\n");
float* array_out_ref = (float*) malloc(N * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
hipMemcpy(array, d_array, sizeof(float) * N, hipMemcpyDeviceToHost);
scan_ref(N, array, array_out_ref);
hipMemcpy(d_array_out, array_out_ref, sizeof(float) * N, hipMemcpyHostToDevice);
cutStopTimer(hTimer);
ctime = cutGetTimerValue(hTimer);
printf("CPU Time = %f\n", ctime);
printf("speedup = %f\n", ctime/gtime);
for (int i = 0; i < N; i++) {
if (array_out[i] != array_out_ref[i]) {
printf("FAIL: %d, %f, %f\n", i, array_out[i], array_out_ref[i]);
}
}
free(array);
free(array_out);
free(array_out_ref);
hipFree(d_array);
hipFree(d_array_out);
}
void test_pdfh() {
seed_rng();
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
const int k = 4;
float mus[4] = { -3, 0, 3, 6 };
float sigma = 0.55f;
const int N = 100;
float data_array[N];
generate_mix_data(k, sigma, mus, data_array, N);
float guess[4] = { -6, 3, 0, -3 };
float c1, c2;
compute_ci1_ci2(sigma, 1.0f / k, c1, c2);
float r = 0;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
for (int i = 0; i < 32768; i++) {
r = log_mgu_pdfh(data_array, N, guess, k, c1, c2, -10.0f, 10.0f);
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("ref pdfh time = %f\n", time);
printf("r = %f\n", r);
}
void test_pdfh2() {
const int k = 4;
float sigma = 0.55f;
const int N = 2;
float data_array[N] = { 0, 0 };
// generate_mix_data(k, sigma, mus, data_array, N);
float guess[k] = { 10, -12, 13, 11 };
float c1, c2;
compute_ci1_ci2(sigma, 1.0f / k, c1, c2);
float r = log_mgu_pdfh(data_array, N, guess, k, c1, c2, -10.0f, 10.0f);
// float r2 = log_mgu_pdf(data_array, N, guess, k, c1, c2);
printf("r = %f\n", r);
}
//void test_MRGseed() {
// unsigned int hTimer;
// double time;
// cutCreateTimer(&hTimer);
//
// unsigned long seeds[6] = { 2, 1, 2, 3, 4, 5 };
//
// seed_MRG32k3a(32, 128, seeds);
//
// int N = 16777216;
// unsigned int* array1 = (unsigned int*) malloc(N * sizeof(unsigned int));
// unsigned int* array2 = (unsigned int*) malloc(N * sizeof(unsigned int));
//
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
// populate_randUI_MRG32k3a(array1, N);
// cutStopTimer(hTimer);
// time = cutGetTimerValue(hTimer);
// printf("no skip time = %f\n", time);
//
// seed_MRG32k3a(32, 128, seeds);
//
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
// populate_randUI_MRG32k3a_skip(array2, N);
// cutStopTimer(hTimer);
// time = cutGetTimerValue(hTimer);
// printf("skip time = %f\n", time);
//
// int k = 4096;
// for (int i = 0; i < 200; i++) {
// printf("%d: %u ", i, array1[k - 1 + i * k]);
// printf("%u\n", array2[i]);
// }
//
// free(array1);
// free(array2);
//
//}
void test_MRG() {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
unsigned long seeds[6] = { 12345UL, 12345UL, 12345UL, 12345UL, 12345UL, 12345UL };
int nb = 128;
int nt = 64;
int tt = nb * nt;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
seed_MRG(nb, nt, seeds);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("seed time = %f\n", time);
// int N = 16777216;
// int N = 1048576;
// int N = 2097152;
int N = 131072;
// int N = 65536;
// int N = 32768;
// int N = 8;
float* array1 = (float*) malloc(N * sizeof(float));
float* array2 = (float*) malloc(N * sizeof(float));
float* d_array2;
hipMalloc((void**) &d_array2, N * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
for (int i = 0; i < 65; i++) {
populate_rand_MRG_REF(array1, N);
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("ref time = %f\n", time);
kill_MRG();
seed_MRG(nb, nt, seeds);
// seedXS();
cutResetTimer(hTimer);
cutStartTimer(hTimer);
populate_rand_MRG_d(d_array2, N);
// populate_rand_XS_d(d_array2,N);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("gpu time = %f\n", time);
hipMemcpy(array2, d_array2, N * sizeof(float), hipMemcpyDeviceToHost);
// int k = 4096;
for (int i = 0; i < 10; i++) {
// printf("%d: %f ", i, array1[1048576 + i]);
// printf("%f\n", array2[tt*i]);
printf("%d: %f ", i, array1[i]);
printf("%f\n", array2[tt * i + 1]);
}
hipFree(d_array2);
free(array1);
free(array2);
}
void compilerSmarts();
void test_rng(int N) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
seed_rng();
float* d_array;
hipMalloc((void**) &d_array, N * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
populate_rand_d(d_array, N);
// compilerSmarts();
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("time = %f\n", time);
float* array = (float*) malloc(N * sizeof(float));
hipMemcpy(array, d_array, N * sizeof(float), hipMemcpyDeviceToHost);
float sum = 0;
for (int i = 0; i < N; i++) {
sum += array[i] - 0.5f;
}
printf("%f\n", sum);
hipFree(d_array);
free(array);
kill_rng();
}
void test_rng2(int N) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
seed_rng(16777216, 32, 128);
unsigned int* d_array;
hipMalloc((void**) &d_array, N * sizeof(unsigned int));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
populate_randUI_d(d_array, N);
populate_randUI_d(d_array, N);
// compilerSmarts();
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("time = %f\n", time);
unsigned int* array = (unsigned int*) malloc(N * sizeof(unsigned int));
hipMemcpy(array, d_array, N * sizeof(unsigned int), hipMemcpyDeviceToHost);
unsigned int sum = 0;
for (int i = 0; i < N; i++) {
sum += array[i];
}
printf("%d\n", sum);
to_file(array, N - 4, "rngdata.txt");
hipFree(d_array);
free(array);
kill_rng();
}
void compilerSmarts() {
unsigned long x;
unsigned long z = 4294967087UL;
for (int i = 0; i < 1000000; i++) {
x = 32;
if (x > z) {
x = x % z;
}
}
printf("%ld\n", x);
}
void test_matrix2() {
// float E[9] = { 0.9673, 0.4522, 0.8797, 0.4522, 0.2890, 0.4882, 0.8797, 0.4882, 1.3795 };
// float EI[9];
const int D = 5;
float E[D * D] = { 2.1487f, 0.8244f, 0.8244f, 0.3297f, 1.3190f, 0.8244f, 3.3718f, 1.6420f,
1.6406f, 2.3812f, 0.8244f, 1.6420f, 2.7485f, 1.2692f, 2.1311f, 0.3297f, 1.6406f,
1.2692f, 1.5613f, 1.4800f, 1.3190f, 2.3812f, 2.1311f, 1.4800f, 3.0657f };
float EI[D * D];
// unsigned int hTimer;
// double time;
// cutCreateTimer(&hTimer);
float d;
printf("%.10f\n", matrix_det(E, D));
// printf("%.10f\n", matrix_det2(E, D));
// printf("%.10f\n", matrix_det3(E, D));
// printf("%.10f\n", matrix_det4(E, D));
matrix_det_inv_pd(E, d, EI, D);
// printf("%.10f\n", d);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
// for (int i = 0; i < 100000; i++) {
// matrix_inverse(E, EI, 3);
// }
// cutStopTimer(hTimer);
// time = cutGetTimerValue(hTimer);
// printf("time = %f\n", time);
// matrix_print(EI, 3, 3);
//
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
// for (int i = 0; i < 100000; i++) {
// matrix_inverse_pd(E, EI, 3);
// }
// cutStopTimer(hTimer);
// time = cutGetTimerValue(hTimer);
// printf("time = %f\n", time);
// matrix_print(EI, 3, 3);
}
void test_burn() {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
int N = 1073741824;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
seed_XS_REF(N);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("time = %f\n", time);
}
int main(int argc, char **argv) {
// test_MRGseed();
// test_pdfh();
// test_MRG();
// int N = 65536;
// int N = 131072;
// int M = 512;
// testLogSpeed(N, M);
// test_rng(16777216);
// test_rng2(1048576);
// test_matrix2();
// test_burn();
// const int N = 4096;
// const int N = 65536;
const int N = 65536;
scan_init(N);
test_scan(N);
scan_destroy();
}
| 43a7cc56f0c6d08caa29ae0239b445c1aa4852fd.cu | /*
* test_util.cu
*
* Created on: 22-Mar-2009
* Author: alee
*/
#include <stdio.h>
#include "gauss.h"
#include <cutil.h>
#include "reduce.h"
#include "scan.h"
#include "mix_gauss.h"
#include "mix_gauss_uniform.h"
#include "MRG.h"
#include "xorshift.h"
#include "rng.h"
#include "output.h"
#include "matrix.h"
__global__ void logtest(int size, float* d_array, int M) {
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tt = blockDim.x * gridDim.x;
int i, j;
float x;
for (i = tid; i < size; i += tt) {
x = i;
for (j = 0; j < M; j++) {
x = logf(x);
x = expf(x);
}
d_array[i] = x;
}
}
void logtestref(int size, float* array, int M) {
float x;
for (int i = 0; i < size; i++) {
x = (float) i;
for (int j = 0; j < M; j++) {
x = logf(x);
x = expf(x);
}
array[i] = x;
}
}
void testLogSpeed(int N, int M) {
unsigned int hTimer;
double gtime, ctime;
cutCreateTimer(&hTimer);
float* array = (float*) malloc(N * sizeof(float));
float* d_array;
cudaMalloc((void**) &d_array, N * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
logtest<<<256,64>>>(N, d_array, M);
cudaThreadSynchronize();
cutStopTimer(hTimer);
gtime = cutGetTimerValue(hTimer);
printf("log test time = %f\n", gtime);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
logtestref(N, array, M);
cutStopTimer(hTimer);
ctime = cutGetTimerValue(hTimer);
printf("ref log test time = %f\n", ctime);
// float* h_array = (float*) malloc(N * sizeof(float));
// cudaMemcpy(h_array, d_array, N * sizeof(float), cudaMemcpyDeviceToHost);
// for (int i = 0; i < 200; i++) {
// printf("%f %f\n", h_array[i], array[i]);
// }
// free(h_array);
printf("speedup = %f\n", ctime / gtime);
free(array);
cudaFree(d_array);
}
void test_reduce2D(int N, int nb, int nt) {
const int D = 2;
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* array = (float*) malloc(N * D * sizeof(float));
float* d_array;
cudaMalloc((void **) &d_array, N * D * sizeof(float));
// populate_rand(array, N * D);
float c = 1;
for (int i = 0; i < N * D; i++) {
if (i % D == 1) {
c = 1;
} else {
c = .5;
}
array[i] = c;
}
cudaMemcpy(d_array, array, sizeof(float) * N * D, cudaMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
double sumh1 = 0;
double sumh2 = 0;
for (int i = 0; i < N; i++) {
sumh1 += array[i * 2];
sumh2 += array[i * 2 + 1];
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Host reduce (2D): sum1 = %f, sum2 = %f\n", sumh1, sumh2);
printf("Time = %f\n\n", time);
float sum[2];
cutResetTimer(hTimer);
cutStartTimer(hTimer);
reduce(N, D, d_array, sum, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("reduce_2D: sum1 = %f, sum2 = %f\n", sum[0], sum[1]);
printf("Time = %f\n\n", time);
free(array);
cudaFree(d_array);
}
void test_reduceMD(int N, int D, int nb, int nt) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* array = (float*) malloc(N * D * sizeof(float));
float* d_array;
cudaMalloc((void **) &d_array, N * D * sizeof(float));
// populate_rand_XS(array, N * D);
float c = 1;
for (int i = 0; i < N * D; i++) {
c = (float) (i % D) + 1;
array[i] = c;
}
cudaMemcpy(d_array, array, sizeof(float) * N * D, cudaMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
// double* sumh = (double*) malloc(D * sizeof(double));
float* sumh = (float*) malloc(D * sizeof(float));
for (int j = 0; j < D; j++) {
sumh[j] = 0;
}
for (int i = 0; i < N; i++) {
for (int j = 0; j < D; j++) {
sumh[j] += array[i * D + j];
}
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Host reduce (MD): (");
for (int i = 0; i < D; i++) {
printf("%f,", sumh[i]);
}
printf(")\n");
printf("Time = %f\n\n", time);
float* sum = (float*) malloc(D * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
reduce(N, D, d_array, sum, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Reduce (MD): (");
for (int i = 0; i < D; i++) {
printf("%f,", sum[i]);
}
printf(")\n");
printf("Time = %f\n\n", time);
free(array);
cudaFree(d_array);
free(sum);
free(sumh);
}
void test_float() {
float f = (float) 67108868.0f;
double d = (double) 67108868;
// float f = 0;
// double d = 0;
// for (int i = 0; i < 20000000; i++) {
// f = f + 4.0;
// d = d + 4.0;
// }
printf("float = %f\n", f);
printf("double = %f\n", d);
// printf("%f\n", 45000000.0f * 4.0f);
}
void test_reduce(int N, int nb, int nt) {
// const int N = 4194304;
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
float* array = (float*) malloc(N * sizeof(float));
float* d_array;
cudaMalloc((void **) &d_array, N * sizeof(float));
for (int i = 0; i < N; i++) {
array[i] = 0.5;
}
cudaMemcpy(d_array, array, sizeof(float) * N, cudaMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
float sum = 0;
double sumh = 0;
for (int i = 0; i < N; i++) {
sumh += array[i];
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("Host reduce: sum = %f\n", sumh);
printf("Time = %f\n\n", time);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
reduce(N, d_array, sum, nb, nt);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("reduce: sum = %f\n", sum);
printf("Time = %f\n\n", time);
free(array);
cudaFree(d_array);
}
void test_matrix() {
const int MA = 3;
const int NA = 4;
float A[12] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 };
const int MB = 4;
const int NB = 3;
float B[12] = { 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23 };
const int MC = MA;
const int NC = NB;
float C[MC * NC];
matrix_times(A, B, C, MA, NA, MB, NB);
matrix_print(C, MC, NC);
int ME = 3;
int NE = 3;
float E[9] = { 1, 2, 2, 5, 7, 3, 4, 8, 7 };
float EI[9];
printf("1\n");
matrix_inverse(E, EI, ME);
printf("2\n");
matrix_print(EI, ME, NE);
printf("3\n");
printf("|E| = %f\n", matrix_det(E, ME));
printf("|EI| = %f\n", matrix_det(EI, ME));
int MF = 4;
int NF = 4;
float F[16] = { 1, 8, 6, 3, 2, 7, 4, 0, -5, 4, -8, 3, 2, -6, 9, 3 };
float FI[16];
matrix_inverse(F, FI, MF);
matrix_print(FI, MF, NF);
printf("|F| = %f\n", matrix_det(F, MF));
printf("|FI| = %f\n", matrix_det(FI, MF));
float x[4] = { 3, 5, 2, 7 };
float r = matrix_xtmx(F, x, 4);
printf("r = %f\n", r);
}
void test_gaussmv() {
const int D = 3;
float x[3] = { 0.5, 1, 2 };
float mu[3] = { 2, 1, 4 };
float cov[9] = { 3, 0.5, 3, 0.5, 2, 0, 3, 0, 4 };
float c2[9];
float c1;
compute_c1_c2(cov, D, c1, c2);
float* h_args = (float*) malloc((1 + D + D * D) * sizeof(float));
h_args[0] = c1;
h_args[1] = c2[0];
h_args[2] = c2[1];
h_args[3] = c2[2];
h_args[4] = c2[3];
h_args[5] = c2[4];
h_args[6] = c2[5];
h_args[7] = c2[6];
h_args[8] = c2[7];
h_args[9] = c2[8];
h_args[10] = mu[0];
h_args[11] = mu[1];
h_args[12] = mu[2];
float r = gauss_pdfh(x, c1, c2, mu, D);
printf("r = %f\n", r);
r = gauss_pdfh(x, h_args, D);
printf("r = %f\n", r);
free(h_args);
}
void test_scan(int N) {
unsigned int hTimer;
double ctime, gtime;
cutCreateTimer(&hTimer);
float* array = (float*) malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
array[i] = 0.5;
}
float* array_out = (float*) malloc(N * sizeof(float));
float* d_array;
cudaMalloc((void **) &d_array, N * sizeof(float));
float* d_array_out;
cudaMalloc((void **) &d_array_out, N * sizeof(float));
cudaMemcpy(d_array, array, sizeof(float) * N, cudaMemcpyHostToDevice);
cutResetTimer(hTimer);
cutStartTimer(hTimer);
scan(N, d_array, d_array_out, 32, 32);
cutStopTimer(hTimer);
gtime = cutGetTimerValue(hTimer);
printf("GPU Time = %f\n\n", gtime);
cudaThreadSynchronize();
cudaMemcpy(array_out, d_array_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
// for (int i = 0; i < N; i++) {
// printf("%f ", array_out[i]);
// }
// printf("\n");
float* array_out_ref = (float*) malloc(N * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
cudaMemcpy(array, d_array, sizeof(float) * N, cudaMemcpyDeviceToHost);
scan_ref(N, array, array_out_ref);
cudaMemcpy(d_array_out, array_out_ref, sizeof(float) * N, cudaMemcpyHostToDevice);
cutStopTimer(hTimer);
ctime = cutGetTimerValue(hTimer);
printf("CPU Time = %f\n", ctime);
printf("speedup = %f\n", ctime/gtime);
for (int i = 0; i < N; i++) {
if (array_out[i] != array_out_ref[i]) {
printf("FAIL: %d, %f, %f\n", i, array_out[i], array_out_ref[i]);
}
}
free(array);
free(array_out);
free(array_out_ref);
cudaFree(d_array);
cudaFree(d_array_out);
}
void test_pdfh() {
seed_rng();
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
const int k = 4;
float mus[4] = { -3, 0, 3, 6 };
float sigma = 0.55f;
const int N = 100;
float data_array[N];
generate_mix_data(k, sigma, mus, data_array, N);
float guess[4] = { -6, 3, 0, -3 };
float c1, c2;
compute_ci1_ci2(sigma, 1.0f / k, c1, c2);
float r = 0;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
for (int i = 0; i < 32768; i++) {
r = log_mgu_pdfh(data_array, N, guess, k, c1, c2, -10.0f, 10.0f);
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("ref pdfh time = %f\n", time);
printf("r = %f\n", r);
}
void test_pdfh2() {
const int k = 4;
float sigma = 0.55f;
const int N = 2;
float data_array[N] = { 0, 0 };
// generate_mix_data(k, sigma, mus, data_array, N);
float guess[k] = { 10, -12, 13, 11 };
float c1, c2;
compute_ci1_ci2(sigma, 1.0f / k, c1, c2);
float r = log_mgu_pdfh(data_array, N, guess, k, c1, c2, -10.0f, 10.0f);
// float r2 = log_mgu_pdf(data_array, N, guess, k, c1, c2);
printf("r = %f\n", r);
}
//void test_MRGseed() {
// unsigned int hTimer;
// double time;
// cutCreateTimer(&hTimer);
//
// unsigned long seeds[6] = { 2, 1, 2, 3, 4, 5 };
//
// seed_MRG32k3a(32, 128, seeds);
//
// int N = 16777216;
// unsigned int* array1 = (unsigned int*) malloc(N * sizeof(unsigned int));
// unsigned int* array2 = (unsigned int*) malloc(N * sizeof(unsigned int));
//
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
// populate_randUI_MRG32k3a(array1, N);
// cutStopTimer(hTimer);
// time = cutGetTimerValue(hTimer);
// printf("no skip time = %f\n", time);
//
// seed_MRG32k3a(32, 128, seeds);
//
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
// populate_randUI_MRG32k3a_skip(array2, N);
// cutStopTimer(hTimer);
// time = cutGetTimerValue(hTimer);
// printf("skip time = %f\n", time);
//
// int k = 4096;
// for (int i = 0; i < 200; i++) {
// printf("%d: %u ", i, array1[k - 1 + i * k]);
// printf("%u\n", array2[i]);
// }
//
// free(array1);
// free(array2);
//
//}
void test_MRG() {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
unsigned long seeds[6] = { 12345UL, 12345UL, 12345UL, 12345UL, 12345UL, 12345UL };
int nb = 128;
int nt = 64;
int tt = nb * nt;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
seed_MRG(nb, nt, seeds);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("seed time = %f\n", time);
// int N = 16777216;
// int N = 1048576;
// int N = 2097152;
int N = 131072;
// int N = 65536;
// int N = 32768;
// int N = 8;
float* array1 = (float*) malloc(N * sizeof(float));
float* array2 = (float*) malloc(N * sizeof(float));
float* d_array2;
cudaMalloc((void**) &d_array2, N * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
for (int i = 0; i < 65; i++) {
populate_rand_MRG_REF(array1, N);
}
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("ref time = %f\n", time);
kill_MRG();
seed_MRG(nb, nt, seeds);
// seedXS();
cutResetTimer(hTimer);
cutStartTimer(hTimer);
populate_rand_MRG_d(d_array2, N);
// populate_rand_XS_d(d_array2,N);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("gpu time = %f\n", time);
cudaMemcpy(array2, d_array2, N * sizeof(float), cudaMemcpyDeviceToHost);
// int k = 4096;
for (int i = 0; i < 10; i++) {
// printf("%d: %f ", i, array1[1048576 + i]);
// printf("%f\n", array2[tt*i]);
printf("%d: %f ", i, array1[i]);
printf("%f\n", array2[tt * i + 1]);
}
cudaFree(d_array2);
free(array1);
free(array2);
}
void compilerSmarts();
void test_rng(int N) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
seed_rng();
float* d_array;
cudaMalloc((void**) &d_array, N * sizeof(float));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
populate_rand_d(d_array, N);
// compilerSmarts();
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("time = %f\n", time);
float* array = (float*) malloc(N * sizeof(float));
cudaMemcpy(array, d_array, N * sizeof(float), cudaMemcpyDeviceToHost);
float sum = 0;
for (int i = 0; i < N; i++) {
sum += array[i] - 0.5f;
}
printf("%f\n", sum);
cudaFree(d_array);
free(array);
kill_rng();
}
void test_rng2(int N) {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
seed_rng(16777216, 32, 128);
unsigned int* d_array;
cudaMalloc((void**) &d_array, N * sizeof(unsigned int));
cutResetTimer(hTimer);
cutStartTimer(hTimer);
populate_randUI_d(d_array, N);
populate_randUI_d(d_array, N);
// compilerSmarts();
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("time = %f\n", time);
unsigned int* array = (unsigned int*) malloc(N * sizeof(unsigned int));
cudaMemcpy(array, d_array, N * sizeof(unsigned int), cudaMemcpyDeviceToHost);
unsigned int sum = 0;
for (int i = 0; i < N; i++) {
sum += array[i];
}
printf("%d\n", sum);
to_file(array, N - 4, "rngdata.txt");
cudaFree(d_array);
free(array);
kill_rng();
}
void compilerSmarts() {
unsigned long x;
unsigned long z = 4294967087UL;
for (int i = 0; i < 1000000; i++) {
x = 32;
if (x > z) {
x = x % z;
}
}
printf("%ld\n", x);
}
void test_matrix2() {
// float E[9] = { 0.9673, 0.4522, 0.8797, 0.4522, 0.2890, 0.4882, 0.8797, 0.4882, 1.3795 };
// float EI[9];
const int D = 5;
float E[D * D] = { 2.1487f, 0.8244f, 0.8244f, 0.3297f, 1.3190f, 0.8244f, 3.3718f, 1.6420f,
1.6406f, 2.3812f, 0.8244f, 1.6420f, 2.7485f, 1.2692f, 2.1311f, 0.3297f, 1.6406f,
1.2692f, 1.5613f, 1.4800f, 1.3190f, 2.3812f, 2.1311f, 1.4800f, 3.0657f };
float EI[D * D];
// unsigned int hTimer;
// double time;
// cutCreateTimer(&hTimer);
float d;
printf("%.10f\n", matrix_det(E, D));
// printf("%.10f\n", matrix_det2(E, D));
// printf("%.10f\n", matrix_det3(E, D));
// printf("%.10f\n", matrix_det4(E, D));
matrix_det_inv_pd(E, d, EI, D);
// printf("%.10f\n", d);
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
// for (int i = 0; i < 100000; i++) {
// matrix_inverse(E, EI, 3);
// }
// cutStopTimer(hTimer);
// time = cutGetTimerValue(hTimer);
// printf("time = %f\n", time);
// matrix_print(EI, 3, 3);
//
// cutResetTimer(hTimer);
// cutStartTimer(hTimer);
// for (int i = 0; i < 100000; i++) {
// matrix_inverse_pd(E, EI, 3);
// }
// cutStopTimer(hTimer);
// time = cutGetTimerValue(hTimer);
// printf("time = %f\n", time);
// matrix_print(EI, 3, 3);
}
void test_burn() {
unsigned int hTimer;
double time;
cutCreateTimer(&hTimer);
int N = 1073741824;
cutResetTimer(hTimer);
cutStartTimer(hTimer);
seed_XS_REF(N);
cutStopTimer(hTimer);
time = cutGetTimerValue(hTimer);
printf("time = %f\n", time);
}
int main(int argc, char **argv) {
// test_MRGseed();
// test_pdfh();
// test_MRG();
// int N = 65536;
// int N = 131072;
// int M = 512;
// testLogSpeed(N, M);
// test_rng(16777216);
// test_rng2(1048576);
// test_matrix2();
// test_burn();
// const int N = 4096;
// const int N = 65536;
const int N = 65536;
scan_init(N);
test_scan(N);
scan_destroy();
}
|
d5ea577e9b0f7abc13e2a8db92860b233479adca.hip | // !!! This is a file automatically generated by hipify!!!
#include "lab2.h"
#include "time.h"
#include <stdlib.h>
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 10800;
int sign(int x) {
return (x > 0) - (x < 0);
}
void init_board(int* sx, int* sy,int* x, int* y,int* lb, int* rb, int cei, int flr);
void show_boarder(uint8_t *yuv,int up, int down)
{
hipMemset(yuv+W*up,255,W);
hipMemset(yuv+W*down,255,W);
}
void show_ball(uint8_t *yuv,int x, int y)
{
hipMemset(yuv+W*(y-1)+x-1,255,3);
hipMemset(yuv+W*y+x-1,255,3);
hipMemset(yuv+W*(y+1)+x-1,255,3);
}
void show_bouncer(uint8_t *yuv,int left_bar_xc,int left_bar_yc,int right_bar_xc,int right_bar_yc,int bar_length_perside,int bar_thickness)
{
int i;
for (i=0;i<H;i++)
{
if (i>=left_bar_yc-bar_length_perside && i<=left_bar_yc+bar_length_perside)
hipMemset(yuv+W*i+left_bar_xc-bar_thickness,255,bar_thickness);
if (i>=right_bar_yc-bar_length_perside && i<=right_bar_yc+bar_length_perside)
hipMemset(yuv+W*i+right_bar_xc,255,bar_thickness);
}
}
void ball_speed_up(int* vx,int*vy)
{
int sgnx=sign(*vx);
int sgny=sign(*vy);
if (rand()%4>0)
(*vx)=(*vx)+1*sgnx;
if (sgny!=0)
{
if (rand()%4>2)
(*vy)=(*vy)+1*sgny;
}
else
{
if (rand()%4>2)
(*vy)=(*vy)+((rand()%2)*2-1);
}
}
void print_number(uint8_t* yuv,int print_locationx,int print_locationy,int digit) /*20x30 digit*/
{
int i;
switch (digit)
{
case 0:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29)
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else
{
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
hipMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
}
break;
case 1:
for (i=0;i<30;i++)
{
hipMemset(yuv+W*(print_locationy+i)+print_locationx+9,255,3);
}
break;
case 2:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29 || i==14 || i==15)
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else if (i<14)
hipMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
else
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
}
break;
case 3:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29 || i==14 || i==15)
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else
hipMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
break;
case 4:
for (i=0;i<30;i++)
{
if (i==14 || i==15)
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else if (i<14)
{
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
hipMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
else
hipMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
break;
case 5:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29 || i==14 || i==15)
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else if (i<14)
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
else
hipMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
break;
case 6:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29 || i==14 || i==15)
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else if (i<14)
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
else
{
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
hipMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
}
break;
case 7:
for (i=0;i<30;i++)
{
if (i==0 || i==1)
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else
hipMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
break;
case 8:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29|| i==14 || i==15)
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else
{
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
hipMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
}
break;
case 9:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29|| i==14 || i==15)
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else if (i<14)
{
hipMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
hipMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
else
hipMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
break;
default:
break;
}
}
void show_score(uint8_t *yuv,int cei, int left_score, int right_score)
{
int print_locationx=10;
int print_locationy=cei+5;
int temp=left_score;
int digit_count=1;
while(temp/10)
{
digit_count+=1;
temp/=10;
}
temp=left_score;
for (int i=digit_count;i>0;i--)
{
print_number(yuv,print_locationx,print_locationy,temp/(pow(10,i-1)));
temp=temp%((int)(pow(10,i-1)));
print_locationx+=22;
}
temp=right_score;
print_locationx=W-10-22;
digit_count=1;
while(temp/10)
{
digit_count+=1;
temp/=10;
}
temp=right_score;
for (int i=digit_count;i>0;i--)
{
print_number(yuv,print_locationx,print_locationy,temp%10);
temp=temp/10;
print_locationx-=22;
}
}
struct Lab2VideoGenerator::Impl {
int left_bar_xc=10;
int right_bar_xc=W-10;
int bar_length_perside=15;
int bar_thickness=2;
int left_score=0;
int right_score=0;
int ball_x=W/2;
int ball_xv=0;
int ball_yv=0;
int ceiling=10;
int flor=H-40;
int left_bar_yc=(flor+ceiling)/2;
int right_bar_yc=(flor+ceiling)/2;
int ball_y=(flor+ceiling)/2;
int t=0;
int bar_max_speed=3;
char phase='i';
};
Lab2VideoGenerator::Lab2VideoGenerator(): impl(new Impl) {
}
Lab2VideoGenerator::~Lab2VideoGenerator() {}
void Lab2VideoGenerator::get_info(Lab2VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 60;
info.fps_d = 1;
};
void Lab2VideoGenerator::Generate(uint8_t *yuv) {
hipMemset(yuv, 0, W*H);
hipMemset(yuv+W*H, 128, W*H/2);
if ((impl->phase)=='i')
{
init_board(&(impl->ball_xv),&(impl->ball_yv),&(impl->ball_x),&(impl->ball_y),&(impl->left_bar_yc),&(impl->right_bar_yc),(impl->ceiling),(impl->flor));
(impl->phase)='r';
}
else if ((impl->phase)=='r')
{
if ((impl->ball_xv)>0)
{
int to_move=(impl->ball_y)-(impl->right_bar_yc);
if (std::abs(to_move)>(impl->bar_max_speed))
to_move=sign(to_move)*(impl->bar_max_speed);
if (to_move>0 && ((impl->right_bar_yc)+(impl->bar_length_perside)+to_move)>(impl->flor))
to_move=(impl->flor)-((impl->right_bar_yc)+(impl->bar_length_perside));
if (to_move<0 && ((impl->right_bar_yc)-(impl->bar_length_perside)+to_move)<(impl->ceiling))
to_move=(impl->ceiling)-((impl->right_bar_yc)-(impl->bar_length_perside));
(impl->right_bar_yc)+=to_move;
}
else
{
int to_move=(impl->ball_y)-(impl->left_bar_yc);
if (std::abs(to_move)>(impl->bar_max_speed))
to_move=sign(to_move)*(impl->bar_max_speed);
if (to_move>0 && ((impl->left_bar_yc)+(impl->bar_length_perside)+to_move)>(impl->flor))
to_move=(impl->flor)-((impl->left_bar_yc)+(impl->bar_length_perside));
if (to_move<0 && ((impl->left_bar_yc)-(impl->bar_length_perside)+to_move)<(impl->ceiling))
to_move=(impl->ceiling)-((impl->left_bar_yc)-(impl->bar_length_perside));
(impl->left_bar_yc)+=to_move;
}
(impl->ball_x)+=(impl->ball_xv);
(impl->ball_y)+=(impl->ball_yv);
if((impl->ball_y)<=(impl->ceiling))
{
(impl->ball_y)=(impl->ceiling)+((impl->ceiling)-(impl->ball_y))+1;
(impl->ball_yv)=(-1)*(impl->ball_yv);
}
if((impl->ball_y)>=(impl->flor))
{
(impl->ball_y)=(impl->flor)-((impl->ball_y)-(impl->flor))-1;
(impl->ball_yv)=(-1)*(impl->ball_yv);
}
if((impl->ball_x)<=(impl->left_bar_xc) && (impl->ball_y)>=(impl->left_bar_yc)-(impl->bar_length_perside) && (impl->ball_y)<=(impl->left_bar_yc)+(impl->bar_length_perside))
{
(impl->ball_x)=(impl->left_bar_xc)+((impl->left_bar_xc)-(impl->ball_x))+1;
(impl->ball_xv)=(-1)*(impl->ball_xv);
ball_speed_up(&(impl->ball_xv),&(impl->ball_yv));
}
if((impl->ball_x)>=(impl->right_bar_xc) && (impl->ball_y)>=(impl->right_bar_yc)-(impl->bar_length_perside) && (impl->ball_y)<=(impl->right_bar_yc)+(impl->bar_length_perside))
{
(impl->ball_x)=(impl->right_bar_xc)-((impl->ball_x)-(impl->right_bar_xc))-1;
(impl->ball_xv)=(-1)*(impl->ball_xv);
ball_speed_up(&(impl->ball_xv),&(impl->ball_yv));
}
if ((impl->ball_x)<=1)
{
(impl->ball_x)=1;
(impl->phase)='g';
(impl->t)=0;
}
if ((impl->ball_x)>=W-1)
{
(impl->ball_x)=W-1;
(impl->phase)='g';
(impl->t)=0;
}
}
else if ((impl->phase)=='g')
{
if ((impl->t)==0)
{
if ((impl->ball_x)==1)
(impl->right_score)+=1;
if ((impl->ball_x)==(W-1))
(impl->left_score)+=1;
}
if ((impl->t)%2)
{
hipMemset(yuv, 255, W*H);
hipMemset(yuv+W*H, 128, W*H/2);
}
if((impl->t)==10)
(impl->phase)='i';
(impl->t)++;
}
show_boarder(yuv,(impl->ceiling),(impl->flor));
show_score(yuv,(impl->flor),(impl->left_score),(impl->right_score));
show_ball(yuv,(impl->ball_x),(impl->ball_y));
show_bouncer(yuv,(impl->left_bar_xc),(impl->left_bar_yc),(impl->right_bar_xc),(impl->right_bar_yc),(impl->bar_length_perside),(impl->bar_thickness));
}
void init_board(int* sx, int* sy,int* x, int* y,int* lb, int* rb, int cei, int flr)
{
srand(time(NULL));
*sx=(rand()%2+1)*((rand()%2)*2-1);
*sy=rand()%3-1;
*x=W/2;
*y=(flr+cei)/2;
*lb=(flr+cei)/2;
*rb=(flr+cei)/2;
}
| d5ea577e9b0f7abc13e2a8db92860b233479adca.cu | #include "lab2.h"
#include "time.h"
#include <stdlib.h>
static const unsigned W = 640;
static const unsigned H = 480;
static const unsigned NFRAME = 10800;
int sign(int x) {
return (x > 0) - (x < 0);
}
void init_board(int* sx, int* sy,int* x, int* y,int* lb, int* rb, int cei, int flr);
void show_boarder(uint8_t *yuv,int up, int down)
{
cudaMemset(yuv+W*up,255,W);
cudaMemset(yuv+W*down,255,W);
}
void show_ball(uint8_t *yuv,int x, int y)
{
cudaMemset(yuv+W*(y-1)+x-1,255,3);
cudaMemset(yuv+W*y+x-1,255,3);
cudaMemset(yuv+W*(y+1)+x-1,255,3);
}
void show_bouncer(uint8_t *yuv,int left_bar_xc,int left_bar_yc,int right_bar_xc,int right_bar_yc,int bar_length_perside,int bar_thickness)
{
int i;
for (i=0;i<H;i++)
{
if (i>=left_bar_yc-bar_length_perside && i<=left_bar_yc+bar_length_perside)
cudaMemset(yuv+W*i+left_bar_xc-bar_thickness,255,bar_thickness);
if (i>=right_bar_yc-bar_length_perside && i<=right_bar_yc+bar_length_perside)
cudaMemset(yuv+W*i+right_bar_xc,255,bar_thickness);
}
}
void ball_speed_up(int* vx,int*vy)
{
int sgnx=sign(*vx);
int sgny=sign(*vy);
if (rand()%4>0)
(*vx)=(*vx)+1*sgnx;
if (sgny!=0)
{
if (rand()%4>2)
(*vy)=(*vy)+1*sgny;
}
else
{
if (rand()%4>2)
(*vy)=(*vy)+((rand()%2)*2-1);
}
}
void print_number(uint8_t* yuv,int print_locationx,int print_locationy,int digit) /*20x30 digit*/
{
int i;
switch (digit)
{
case 0:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else
{
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
}
break;
case 1:
for (i=0;i<30;i++)
{
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+9,255,3);
}
break;
case 2:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29 || i==14 || i==15)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else if (i<14)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
else
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
}
break;
case 3:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29 || i==14 || i==15)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
break;
case 4:
for (i=0;i<30;i++)
{
if (i==14 || i==15)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else if (i<14)
{
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
else
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
break;
case 5:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29 || i==14 || i==15)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else if (i<14)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
else
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
break;
case 6:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29 || i==14 || i==15)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else if (i<14)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
else
{
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
}
break;
case 7:
for (i=0;i<30;i++)
{
if (i==0 || i==1)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
break;
case 8:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29|| i==14 || i==15)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else
{
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
}
break;
case 9:
for (i=0;i<30;i++)
{
if (i==0 || i==1 || i==28 || i==29|| i==14 || i==15)
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,20);
else if (i<14)
{
cudaMemset(yuv+W*(print_locationy+i)+print_locationx,255,3);
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
else
cudaMemset(yuv+W*(print_locationy+i)+print_locationx+17,255,3);
}
break;
default:
break;
}
}
void show_score(uint8_t *yuv,int cei, int left_score, int right_score)
{
int print_locationx=10;
int print_locationy=cei+5;
int temp=left_score;
int digit_count=1;
while(temp/10)
{
digit_count+=1;
temp/=10;
}
temp=left_score;
for (int i=digit_count;i>0;i--)
{
print_number(yuv,print_locationx,print_locationy,temp/(pow(10,i-1)));
temp=temp%((int)(pow(10,i-1)));
print_locationx+=22;
}
temp=right_score;
print_locationx=W-10-22;
digit_count=1;
while(temp/10)
{
digit_count+=1;
temp/=10;
}
temp=right_score;
for (int i=digit_count;i>0;i--)
{
print_number(yuv,print_locationx,print_locationy,temp%10);
temp=temp/10;
print_locationx-=22;
}
}
struct Lab2VideoGenerator::Impl {
int left_bar_xc=10;
int right_bar_xc=W-10;
int bar_length_perside=15;
int bar_thickness=2;
int left_score=0;
int right_score=0;
int ball_x=W/2;
int ball_xv=0;
int ball_yv=0;
int ceiling=10;
int flor=H-40;
int left_bar_yc=(flor+ceiling)/2;
int right_bar_yc=(flor+ceiling)/2;
int ball_y=(flor+ceiling)/2;
int t=0;
int bar_max_speed=3;
char phase='i';
};
Lab2VideoGenerator::Lab2VideoGenerator(): impl(new Impl) {
}
Lab2VideoGenerator::~Lab2VideoGenerator() {}
void Lab2VideoGenerator::get_info(Lab2VideoInfo &info) {
info.w = W;
info.h = H;
info.n_frame = NFRAME;
// fps = 24/1 = 24
info.fps_n = 60;
info.fps_d = 1;
};
void Lab2VideoGenerator::Generate(uint8_t *yuv) {
cudaMemset(yuv, 0, W*H);
cudaMemset(yuv+W*H, 128, W*H/2);
if ((impl->phase)=='i')
{
init_board(&(impl->ball_xv),&(impl->ball_yv),&(impl->ball_x),&(impl->ball_y),&(impl->left_bar_yc),&(impl->right_bar_yc),(impl->ceiling),(impl->flor));
(impl->phase)='r';
}
else if ((impl->phase)=='r')
{
if ((impl->ball_xv)>0)
{
int to_move=(impl->ball_y)-(impl->right_bar_yc);
if (std::abs(to_move)>(impl->bar_max_speed))
to_move=sign(to_move)*(impl->bar_max_speed);
if (to_move>0 && ((impl->right_bar_yc)+(impl->bar_length_perside)+to_move)>(impl->flor))
to_move=(impl->flor)-((impl->right_bar_yc)+(impl->bar_length_perside));
if (to_move<0 && ((impl->right_bar_yc)-(impl->bar_length_perside)+to_move)<(impl->ceiling))
to_move=(impl->ceiling)-((impl->right_bar_yc)-(impl->bar_length_perside));
(impl->right_bar_yc)+=to_move;
}
else
{
int to_move=(impl->ball_y)-(impl->left_bar_yc);
if (std::abs(to_move)>(impl->bar_max_speed))
to_move=sign(to_move)*(impl->bar_max_speed);
if (to_move>0 && ((impl->left_bar_yc)+(impl->bar_length_perside)+to_move)>(impl->flor))
to_move=(impl->flor)-((impl->left_bar_yc)+(impl->bar_length_perside));
if (to_move<0 && ((impl->left_bar_yc)-(impl->bar_length_perside)+to_move)<(impl->ceiling))
to_move=(impl->ceiling)-((impl->left_bar_yc)-(impl->bar_length_perside));
(impl->left_bar_yc)+=to_move;
}
(impl->ball_x)+=(impl->ball_xv);
(impl->ball_y)+=(impl->ball_yv);
if((impl->ball_y)<=(impl->ceiling))
{
(impl->ball_y)=(impl->ceiling)+((impl->ceiling)-(impl->ball_y))+1;
(impl->ball_yv)=(-1)*(impl->ball_yv);
}
if((impl->ball_y)>=(impl->flor))
{
(impl->ball_y)=(impl->flor)-((impl->ball_y)-(impl->flor))-1;
(impl->ball_yv)=(-1)*(impl->ball_yv);
}
if((impl->ball_x)<=(impl->left_bar_xc) && (impl->ball_y)>=(impl->left_bar_yc)-(impl->bar_length_perside) && (impl->ball_y)<=(impl->left_bar_yc)+(impl->bar_length_perside))
{
(impl->ball_x)=(impl->left_bar_xc)+((impl->left_bar_xc)-(impl->ball_x))+1;
(impl->ball_xv)=(-1)*(impl->ball_xv);
ball_speed_up(&(impl->ball_xv),&(impl->ball_yv));
}
if((impl->ball_x)>=(impl->right_bar_xc) && (impl->ball_y)>=(impl->right_bar_yc)-(impl->bar_length_perside) && (impl->ball_y)<=(impl->right_bar_yc)+(impl->bar_length_perside))
{
(impl->ball_x)=(impl->right_bar_xc)-((impl->ball_x)-(impl->right_bar_xc))-1;
(impl->ball_xv)=(-1)*(impl->ball_xv);
ball_speed_up(&(impl->ball_xv),&(impl->ball_yv));
}
if ((impl->ball_x)<=1)
{
(impl->ball_x)=1;
(impl->phase)='g';
(impl->t)=0;
}
if ((impl->ball_x)>=W-1)
{
(impl->ball_x)=W-1;
(impl->phase)='g';
(impl->t)=0;
}
}
else if ((impl->phase)=='g')
{
if ((impl->t)==0)
{
if ((impl->ball_x)==1)
(impl->right_score)+=1;
if ((impl->ball_x)==(W-1))
(impl->left_score)+=1;
}
if ((impl->t)%2)
{
cudaMemset(yuv, 255, W*H);
cudaMemset(yuv+W*H, 128, W*H/2);
}
if((impl->t)==10)
(impl->phase)='i';
(impl->t)++;
}
show_boarder(yuv,(impl->ceiling),(impl->flor));
show_score(yuv,(impl->flor),(impl->left_score),(impl->right_score));
show_ball(yuv,(impl->ball_x),(impl->ball_y));
show_bouncer(yuv,(impl->left_bar_xc),(impl->left_bar_yc),(impl->right_bar_xc),(impl->right_bar_yc),(impl->bar_length_perside),(impl->bar_thickness));
}
void init_board(int* sx, int* sy,int* x, int* y,int* lb, int* rb, int cei, int flr)
{
srand(time(NULL));
*sx=(rand()%2+1)*((rand()%2)*2-1);
*sy=rand()%3-1;
*x=W/2;
*y=(flr+cei)/2;
*lb=(flr+cei)/2;
*rb=(flr+cei)/2;
}
|
f1a6caa6c8641811be3abc42bc43af24a3fefd4a.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "NvtxMemoryPool.h"
#include <hip/hip_runtime_api.h>
#include <cassert>
#include <cstddef>
#include <cstdint>
#define checkCudaErrors(Code) assert((Code) == hipSuccess)
#define checkCudaLaunch(...) checkCudaErrors((__VA_ARGS__, hipPeekAtLastError()))
__global__ void Iota(uint8_t* v)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
v[i] = static_cast<uint8_t>(i);
}
int main(void)
{
constexpr size_t PoolSize = 4096 * sizeof(uint8_t);
constexpr size_t NumThreads = 63;
constexpr size_t AllocSize = NumThreads * sizeof(uint8_t);
auto nvtxDomain = nvtxDomainCreateA("my-domain");
void *pool;
checkCudaErrors(hipMalloc(&pool, PoolSize));
{
// Suballocator object creation (c.f. NvtxMemoryPool.h)
auto suballocator = NV::Suballocator(nvtxDomain, pool, PoolSize);
// Create a suballocation of size AllocSize at offset 16
auto alloc = (uint8_t*)pool + 16;
suballocator.Register(alloc, AllocSize);
// Success: allocation is valid
hipLaunchKernelGGL(( checkCudaLaunch(Iota), dim3(1), dim3(NumThreads), 0, 0, alloc));
checkCudaErrors(hipDeviceSynchronize());
// Violation: last byte out of bounds
hipLaunchKernelGGL(( checkCudaLaunch(Iota), dim3(1), dim3(NumThreads + 1), 0, 0, alloc));
checkCudaErrors(hipDeviceSynchronize());
// Success: resizing
suballocator.Resize(alloc, AllocSize + 1);
hipLaunchKernelGGL(( checkCudaLaunch(Iota), dim3(1), dim3(NumThreads + 1), 0, 0, alloc));
checkCudaErrors(hipDeviceSynchronize());
// Violation: access after free
suballocator.Unregister(alloc);
hipLaunchKernelGGL(( checkCudaLaunch(Iota), dim3(1), dim3(1), 0, 0, alloc));
checkCudaErrors(hipDeviceSynchronize());
// Violation: access after reset
suballocator.Register(alloc, AllocSize);
suballocator.Reset();
hipLaunchKernelGGL(( checkCudaLaunch(Iota), dim3(1), dim3(1), 0, 0, alloc));
checkCudaErrors(hipDeviceSynchronize());
}
checkCudaErrors(hipFree(pool));
}
| f1a6caa6c8641811be3abc42bc43af24a3fefd4a.cu | /* Copyright (c) 2021-2023, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "NvtxMemoryPool.h"
#include <cuda_runtime_api.h>
#include <cassert>
#include <cstddef>
#include <cstdint>
#define checkCudaErrors(Code) assert((Code) == cudaSuccess)
#define checkCudaLaunch(...) checkCudaErrors((__VA_ARGS__, cudaPeekAtLastError()))
__global__ void Iota(uint8_t* v)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
v[i] = static_cast<uint8_t>(i);
}
int main(void)
{
constexpr size_t PoolSize = 4096 * sizeof(uint8_t);
constexpr size_t NumThreads = 63;
constexpr size_t AllocSize = NumThreads * sizeof(uint8_t);
auto nvtxDomain = nvtxDomainCreateA("my-domain");
void *pool;
checkCudaErrors(cudaMalloc(&pool, PoolSize));
{
// Suballocator object creation (c.f. NvtxMemoryPool.h)
auto suballocator = NV::Suballocator(nvtxDomain, pool, PoolSize);
// Create a suballocation of size AllocSize at offset 16
auto alloc = (uint8_t*)pool + 16;
suballocator.Register(alloc, AllocSize);
// Success: allocation is valid
checkCudaLaunch(Iota<<<1, NumThreads>>>(alloc));
checkCudaErrors(cudaDeviceSynchronize());
// Violation: last byte out of bounds
checkCudaLaunch(Iota<<<1, NumThreads + 1>>>(alloc));
checkCudaErrors(cudaDeviceSynchronize());
// Success: resizing
suballocator.Resize(alloc, AllocSize + 1);
checkCudaLaunch(Iota<<<1, NumThreads + 1>>>(alloc));
checkCudaErrors(cudaDeviceSynchronize());
// Violation: access after free
suballocator.Unregister(alloc);
checkCudaLaunch(Iota<<<1, 1>>>(alloc));
checkCudaErrors(cudaDeviceSynchronize());
// Violation: access after reset
suballocator.Register(alloc, AllocSize);
suballocator.Reset();
checkCudaLaunch(Iota<<<1, 1>>>(alloc));
checkCudaErrors(cudaDeviceSynchronize());
}
checkCudaErrors(cudaFree(pool));
}
|
8bca1ddfacf529087a66d99ec62afb02424d07f9.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "freshman.h"
void sumArrays(float * a,float * b,float * res,const int size)
{
for(int i=0;i<size;i+=4)
{
res[i]=a[i]+b[i];
res[i+1]=a[i+1]+b[i+1];
res[i+2]=a[i+2]+b[i+2];
res[i+3]=a[i+3]+b[i+3];
}
}
__global__ void sumArraysGPU(float*a,float*b,float*res,int N)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i < N)
res[i]=a[i]+b[i];
}
int main(int argc,char **argv)
{
// set up device
initDevice(0);
int nElem=1<<24;
printf("Vector size:%d\n",nElem);
int nByte=sizeof(float)*nElem;
float *a_h=(float*)malloc(nByte);
float *b_h=(float*)malloc(nByte);
float *res_h=(float*)malloc(nByte);
float *res_from_gpu_h=(float*)malloc(nByte);
memset(res_h,0,nByte);
memset(res_from_gpu_h,0,nByte);
float *a_d,*b_d,*res_d;
CHECK(hipMalloc((float**)&a_d,nByte));
CHECK(hipMalloc((float**)&b_d,nByte));
CHECK(hipMalloc((float**)&res_d,nByte));
initialData(a_h,nElem);
initialData(b_h,nElem);
CHECK(hipMemcpy(a_d,a_h,nByte,hipMemcpyHostToDevice));
CHECK(hipMemcpy(b_d,b_h,nByte,hipMemcpyHostToDevice));
dim3 block(1024);
dim3 grid((nElem-1)/block.x+1);
//timer
double iStart,iElaps;
iStart=cpuSecond();
hipLaunchKernelGGL(( sumArraysGPU), dim3(grid),dim3(block), 0, 0, a_d,b_d,res_d,nElem);
CHECK(hipMemcpy(res_from_gpu_h,res_d,nByte,hipMemcpyDeviceToHost));
iElaps=cpuSecond()-iStart;
printf("Execution configuration<<<%d,%d>>> Time elapsed %f sec\n",grid.x,block.x,iElaps);
sumArrays(a_h,b_h,res_h,nElem);
checkResult(res_h,res_from_gpu_h,nElem);
hipFree(a_d);
hipFree(b_d);
hipFree(res_d);
free(a_h);
free(b_h);
free(res_h);
free(res_from_gpu_h);
return 0;
}
| 8bca1ddfacf529087a66d99ec62afb02424d07f9.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include "freshman.h"
void sumArrays(float * a,float * b,float * res,const int size)
{
for(int i=0;i<size;i+=4)
{
res[i]=a[i]+b[i];
res[i+1]=a[i+1]+b[i+1];
res[i+2]=a[i+2]+b[i+2];
res[i+3]=a[i+3]+b[i+3];
}
}
__global__ void sumArraysGPU(float*a,float*b,float*res,int N)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i < N)
res[i]=a[i]+b[i];
}
int main(int argc,char **argv)
{
// set up device
initDevice(0);
int nElem=1<<24;
printf("Vector size:%d\n",nElem);
int nByte=sizeof(float)*nElem;
float *a_h=(float*)malloc(nByte);
float *b_h=(float*)malloc(nByte);
float *res_h=(float*)malloc(nByte);
float *res_from_gpu_h=(float*)malloc(nByte);
memset(res_h,0,nByte);
memset(res_from_gpu_h,0,nByte);
float *a_d,*b_d,*res_d;
CHECK(cudaMalloc((float**)&a_d,nByte));
CHECK(cudaMalloc((float**)&b_d,nByte));
CHECK(cudaMalloc((float**)&res_d,nByte));
initialData(a_h,nElem);
initialData(b_h,nElem);
CHECK(cudaMemcpy(a_d,a_h,nByte,cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(b_d,b_h,nByte,cudaMemcpyHostToDevice));
dim3 block(1024);
dim3 grid((nElem-1)/block.x+1);
//timer
double iStart,iElaps;
iStart=cpuSecond();
sumArraysGPU<<<grid,block>>>(a_d,b_d,res_d,nElem);
CHECK(cudaMemcpy(res_from_gpu_h,res_d,nByte,cudaMemcpyDeviceToHost));
iElaps=cpuSecond()-iStart;
printf("Execution configuration<<<%d,%d>>> Time elapsed %f sec\n",grid.x,block.x,iElaps);
sumArrays(a_h,b_h,res_h,nElem);
checkResult(res_h,res_from_gpu_h,nElem);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(res_d);
free(a_h);
free(b_h);
free(res_h);
free(res_from_gpu_h);
return 0;
}
|
f991e7187fba984b0236dab6bf9a704e61b3636d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int index = i + j * numCols;
if (i < numCols && j < numRows)
{
uchar4 rgb = rgbaImage[index];
float result = .299f * rgb.x + .587f * rgb.y + .114f * rgb.z;
greyImage[index] = result;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
size_t K = 16;
const dim3 blockSize(K, K, 1); //TODO
const dim3 gridSize((numCols + K - 1) / K, (numRows + K - 1) / K, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| f991e7187fba984b0236dab6bf9a704e61b3636d.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int index = i + j * numCols;
if (i < numCols && j < numRows)
{
uchar4 rgb = rgbaImage[index];
float result = .299f * rgb.x + .587f * rgb.y + .114f * rgb.z;
greyImage[index] = result;
}
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
size_t K = 16;
const dim3 blockSize(K, K, 1); //TODO
const dim3 gridSize((numCols + K - 1) / K, (numRows + K - 1) / K, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
c935867735ea23bde8ad91b81dbe96a8ee7dec72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by zhangjian on 19-6-17.
//
#include "../common/book.h"
#include "../common/image.h"
#include <iostream>
#define rnd(x) (x * rand() / RAND_MAX)
#define INF 2e10f
#define SPHERES 20
const int DIM = 1024;
struct Sphere{
float r, b, g;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n){
float dx = ox - x;
float dy = oy - y;
if (dx * dx + dy * dy < radius * radius){
float dz = sqrtf(radius * radius - dx * dx - dy * dy);
*n = dz / sqrtf(radius * radius);
return dz + z;
}
return -INF;
}
};
__constant__ Sphere s[SPHERES];
__global__ void kernel(unsigned char *ptr){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM / 2);
float oy = (y - DIM / 2);
float r(0), g(0), b(0);
float maxz = -INF;
for(int i = 0; i < SPHERES; i++){
float n;
float t = s[i].hit(ox, oy, &n);
if(t > maxz){
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
int main(void){
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
IMAGE bitmap(DIM, DIM);
unsigned char * dev_bitmap;
// Sphere *s;
hipMalloc((void**)&dev_bitmap, bitmap.image_size());
// hipMalloc((void**)&s, sizeof(Sphere) * SPHERES);
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for(int i = 0; i < SPHERES; i++){
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
// hipMemcpy(s, temp_s, sizeof(Sphere)* SPHERES, hipMemcpyHostToDevice);
hipMemcpyToSymbol(s, temp_s, sizeof(Sphere) * SPHERES);
free(temp_s);
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
hipLaunchKernelGGL(( kernel), dim3(grids), dim3(threads) , 0, 0, dev_bitmap);
hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
std::cout << "Time to generate: %3.1f ms\n" << elapsedTime << std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(dev_bitmap);
// hipFree(s);
bitmap.show_image();
}
| c935867735ea23bde8ad91b81dbe96a8ee7dec72.cu | //
// Created by zhangjian on 19-6-17.
//
#include "../common/book.h"
#include "../common/image.h"
#include <iostream>
#define rnd(x) (x * rand() / RAND_MAX)
#define INF 2e10f
#define SPHERES 20
const int DIM = 1024;
struct Sphere{
float r, b, g;
float radius;
float x, y, z;
__device__ float hit(float ox, float oy, float *n){
float dx = ox - x;
float dy = oy - y;
if (dx * dx + dy * dy < radius * radius){
float dz = sqrtf(radius * radius - dx * dx - dy * dy);
*n = dz / sqrtf(radius * radius);
return dz + z;
}
return -INF;
}
};
__constant__ Sphere s[SPHERES];
__global__ void kernel(unsigned char *ptr){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - DIM / 2);
float oy = (y - DIM / 2);
float r(0), g(0), b(0);
float maxz = -INF;
for(int i = 0; i < SPHERES; i++){
float n;
float t = s[i].hit(ox, oy, &n);
if(t > maxz){
float fscale = n;
r = s[i].r * fscale;
g = s[i].g * fscale;
b = s[i].b * fscale;
maxz = t;
}
}
ptr[offset*4 + 0] = (int)(r * 255);
ptr[offset*4 + 1] = (int)(g * 255);
ptr[offset*4 + 2] = (int)(b * 255);
ptr[offset*4 + 3] = 255;
}
int main(void){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
IMAGE bitmap(DIM, DIM);
unsigned char * dev_bitmap;
// Sphere *s;
cudaMalloc((void**)&dev_bitmap, bitmap.image_size());
// cudaMalloc((void**)&s, sizeof(Sphere) * SPHERES);
Sphere *temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES);
for(int i = 0; i < SPHERES; i++){
temp_s[i].r = rnd(1.0f);
temp_s[i].g = rnd(1.0f);
temp_s[i].b = rnd(1.0f);
temp_s[i].x = rnd(1000.0f) - 500;
temp_s[i].y = rnd(1000.0f) - 500;
temp_s[i].z = rnd(1000.0f) - 500;
temp_s[i].radius = rnd(100.0f) + 20;
}
// cudaMemcpy(s, temp_s, sizeof(Sphere)* SPHERES, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(s, temp_s, sizeof(Sphere) * SPHERES);
free(temp_s);
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
kernel<<<grids, threads >>>(dev_bitmap);
cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
std::cout << "Time to generate: %3.1f ms\n" << elapsedTime << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(dev_bitmap);
// cudaFree(s);
bitmap.show_image();
}
|
492e28fe7bc3957c4637b450ba68edcc57ab7fc9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add( int a, int b, int *c ) {
*c = a + b;
}
int main( void ) {
int c;
int *dev_c;
hipMalloc( (void**)&dev_c, sizeof(int) );
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, 2, 3, dev_c );
hipMemcpy( &c, dev_c, sizeof(int), hipMemcpyDeviceToHost );
printf( "2 + 3 = %d\n", c );
hipFree( dev_c );
return 0;
}
| 492e28fe7bc3957c4637b450ba68edcc57ab7fc9.cu | #include <stdio.h>
__global__ void add( int a, int b, int *c ) {
*c = a + b;
}
int main( void ) {
int c;
int *dev_c;
cudaMalloc( (void**)&dev_c, sizeof(int) );
add<<<1,1>>>( 2, 3, dev_c );
cudaMemcpy( &c, dev_c, sizeof(int), cudaMemcpyDeviceToHost );
printf( "2 + 3 = %d\n", c );
cudaFree( dev_c );
return 0;
}
|
c7afe91a21dc4ff75dd461811b0f7125a372fe64.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <stdint.h>
__global__ void activate_rect_fwd_kernel(
const float *in_act,
uint32_t dim,
float *out_act)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
out_act[idx] = x * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_fwd(
const float *in_act,
size_t dim,
float *out_act,
hipStream_t stream)
{
hipLaunchKernelGGL(( activate_rect_fwd_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
in_act, dim, out_act);
}
__global__ void activate_rect_bwd_kernel(
const float *in_act,
uint32_t dim,
const float *out_delta,
float *in_delta)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
in_delta[idx] = out_delta[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_bwd(
const float *in_act,
size_t dim,
const float *out_delta,
float *in_delta,
hipStream_t stream)
{
hipLaunchKernelGGL(( activate_rect_bwd_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
in_act, dim, out_delta, in_delta);
}
__global__ void activate_rect_bwd2_kernel(
const float *in_act,
uint32_t dim,
const float *out_delta2,
float *in_delta2)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
in_delta2[idx] = out_delta2[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_bwd2(
const float *in_act,
size_t dim,
const float *out_delta2,
float *in_delta2,
hipStream_t stream)
{
hipLaunchKernelGGL(( activate_rect_bwd2_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
in_act, dim, out_delta2, in_delta2);
}
__global__ void activate_rect_rfwd_kernel(
const float *in_val,
uint32_t dim,
const float *in_r_val,
float *out_r_val)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_val[idx];
out_r_val[idx] = in_r_val[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_rfwd(
const float *in_val,
size_t dim,
const float *in_r_val,
float *out_r_val,
hipStream_t stream)
{
hipLaunchKernelGGL(( activate_rect_rfwd_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
in_val, dim, in_r_val, out_r_val);
}
__global__ void activate_rect_rbwd_kernel(
const float *in_val,
uint32_t dim,
const float *out_r_grad,
float *in_r_grad)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_val[idx];
in_r_grad[idx] = out_r_grad[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_rbwd(
const float *in_val,
size_t dim,
const float *out_r_grad,
float *in_r_grad,
hipStream_t stream)
{
hipLaunchKernelGGL(( activate_rect_rbwd_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
in_val, dim, out_r_grad, in_r_grad);
}
__global__ void activate_leakrect_fwd_kernel(
const float *in_act,
uint32_t dim,
float *out_act,
float neg_slope)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
int mask = x > 0.0f;
out_act[idx] = x * (neg_slope * (1 - mask) + mask);
}
}
extern "C" void neuralops_cuda_activate_leakrect_fwd(
const float *in_act,
size_t dim,
float *out_act,
float neg_slope,
hipStream_t stream)
{
hipLaunchKernelGGL(( activate_leakrect_fwd_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
in_act, dim, out_act, neg_slope);
}
__global__ void activate_leakrect_bwd_kernel(
const float *in_act,
uint32_t dim,
const float *out_delta,
float *in_delta,
float neg_slope)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
int mask = x > 0.0f;
float dy = out_delta[idx];
in_delta[idx] = dy * (neg_slope * (1 - mask) + mask);
}
}
extern "C" void neuralops_cuda_activate_leakrect_bwd(
const float *in_act,
size_t dim,
const float *out_delta,
float *in_delta,
float neg_slope,
hipStream_t stream)
{
hipLaunchKernelGGL(( activate_leakrect_bwd_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
in_act, dim, out_delta, in_delta, neg_slope);
}
__global__ void activate_logistic_fwd_kernel(
const float *in_buf,
uint32_t dim,
float *out_buf)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_buf[idx];
out_buf[idx] = 1.0f / (1.0f + expf(-x));
}
}
extern "C" void neuralops_cuda_activate_logistic_fwd(
const float *in_buf,
size_t dim,
float *out_buf,
hipStream_t stream)
{
hipLaunchKernelGGL(( activate_logistic_fwd_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
in_buf, dim, out_buf);
}
__global__ void activate_logistic_bwd_kernel(
const float *in_buf,
uint32_t dim,
const float *out_delta,
float *in_delta)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_buf[idx];
float y = 1.0f / (1.0f + expf(-x));
in_delta[idx] = y * (1.0f - y) * out_delta[idx];
}
}
extern "C" void neuralops_cuda_activate_logistic_bwd(
const float *in_buf,
size_t dim,
const float *out_delta,
float *in_delta,
hipStream_t stream)
{
hipLaunchKernelGGL(( activate_logistic_bwd_kernel), dim3((dim+1024-1)/1024), dim3(1024), 0, stream,
in_buf, dim, out_delta, in_delta);
}
| c7afe91a21dc4ff75dd461811b0f7125a372fe64.cu | #include <cuda_runtime_api.h>
#include <stdint.h>
__global__ void activate_rect_fwd_kernel(
const float *in_act,
uint32_t dim,
float *out_act)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
out_act[idx] = x * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_fwd(
const float *in_act,
size_t dim,
float *out_act,
cudaStream_t stream)
{
activate_rect_fwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_act, dim, out_act);
}
__global__ void activate_rect_bwd_kernel(
const float *in_act,
uint32_t dim,
const float *out_delta,
float *in_delta)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
in_delta[idx] = out_delta[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_bwd(
const float *in_act,
size_t dim,
const float *out_delta,
float *in_delta,
cudaStream_t stream)
{
activate_rect_bwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_act, dim, out_delta, in_delta);
}
__global__ void activate_rect_bwd2_kernel(
const float *in_act,
uint32_t dim,
const float *out_delta2,
float *in_delta2)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
in_delta2[idx] = out_delta2[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_bwd2(
const float *in_act,
size_t dim,
const float *out_delta2,
float *in_delta2,
cudaStream_t stream)
{
activate_rect_bwd2_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_act, dim, out_delta2, in_delta2);
}
__global__ void activate_rect_rfwd_kernel(
const float *in_val,
uint32_t dim,
const float *in_r_val,
float *out_r_val)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_val[idx];
out_r_val[idx] = in_r_val[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_rfwd(
const float *in_val,
size_t dim,
const float *in_r_val,
float *out_r_val,
cudaStream_t stream)
{
activate_rect_rfwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_val, dim, in_r_val, out_r_val);
}
__global__ void activate_rect_rbwd_kernel(
const float *in_val,
uint32_t dim,
const float *out_r_grad,
float *in_r_grad)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_val[idx];
in_r_grad[idx] = out_r_grad[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_rbwd(
const float *in_val,
size_t dim,
const float *out_r_grad,
float *in_r_grad,
cudaStream_t stream)
{
activate_rect_rbwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_val, dim, out_r_grad, in_r_grad);
}
__global__ void activate_leakrect_fwd_kernel(
const float *in_act,
uint32_t dim,
float *out_act,
float neg_slope)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
int mask = x > 0.0f;
out_act[idx] = x * (neg_slope * (1 - mask) + mask);
}
}
extern "C" void neuralops_cuda_activate_leakrect_fwd(
const float *in_act,
size_t dim,
float *out_act,
float neg_slope,
cudaStream_t stream)
{
activate_leakrect_fwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_act, dim, out_act, neg_slope);
}
__global__ void activate_leakrect_bwd_kernel(
const float *in_act,
uint32_t dim,
const float *out_delta,
float *in_delta,
float neg_slope)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
int mask = x > 0.0f;
float dy = out_delta[idx];
in_delta[idx] = dy * (neg_slope * (1 - mask) + mask);
}
}
extern "C" void neuralops_cuda_activate_leakrect_bwd(
const float *in_act,
size_t dim,
const float *out_delta,
float *in_delta,
float neg_slope,
cudaStream_t stream)
{
activate_leakrect_bwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_act, dim, out_delta, in_delta, neg_slope);
}
__global__ void activate_logistic_fwd_kernel(
const float *in_buf,
uint32_t dim,
float *out_buf)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_buf[idx];
out_buf[idx] = 1.0f / (1.0f + expf(-x));
}
}
extern "C" void neuralops_cuda_activate_logistic_fwd(
const float *in_buf,
size_t dim,
float *out_buf,
cudaStream_t stream)
{
activate_logistic_fwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_buf, dim, out_buf);
}
__global__ void activate_logistic_bwd_kernel(
const float *in_buf,
uint32_t dim,
const float *out_delta,
float *in_delta)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_buf[idx];
float y = 1.0f / (1.0f + expf(-x));
in_delta[idx] = y * (1.0f - y) * out_delta[idx];
}
}
extern "C" void neuralops_cuda_activate_logistic_bwd(
const float *in_buf,
size_t dim,
const float *out_delta,
float *in_delta,
cudaStream_t stream)
{
activate_logistic_bwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_buf, dim, out_delta, in_delta);
}
|
d3887886db0b0401208595398d9369e2d0339ba9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/NativeFunctions.h>
#include <ATen/NumericUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorCompare.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/hip/HIPApplyUtils.cuh>
namespace at { namespace native {
namespace {
void where_kernel_impl(TensorIterator &iter, ScalarType condition_type) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.dtype(), "where_cuda", [&] {
if (condition_type == at::ScalarType::Byte) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (uint8_t cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
}
});
}
void isposinf_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void isneginf_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void clamp_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower, scalar_t upper) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (at::_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_scalar_kernel_impl(TensorIterator& iter, Scalar min, Scalar max) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_scalar_cuda", [&] {
const auto lower = min.to<scalar_t>();
const auto upper = max.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (at::_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_min_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_cuda", [&] {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_min_scalar_kernel_impl(TensorIterator& iter, Scalar min) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_scalar_cuda", [&] {
auto lower = min.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_max_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_cuda", [&] {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t upper) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
void clamp_max_scalar_kernel_impl(TensorIterator& iter, Scalar max) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_scalar_cuda", [&] {
const auto upper = max.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
} // anonymous namespace
REGISTER_DISPATCH(where_kernel, &where_kernel_impl);
REGISTER_DISPATCH(isposinf_stub, &isposinf_kernel_impl);
REGISTER_DISPATCH(isneginf_stub, &isneginf_kernel_impl);
REGISTER_DISPATCH(clamp_stub, &clamp_kernel_impl);
REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_impl);
REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_impl);
REGISTER_DISPATCH(clamp_scalar_stub, &clamp_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_min_scalar_stub, &clamp_min_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_max_scalar_stub, &clamp_max_scalar_kernel_impl);
template <typename scalar_t>
__global__ void _assert_async_cuda_kernel(scalar_t* input) {
CUDA_KERNEL_ASSERT(input[0] != 0);
}
__global__ void _assert_async_cuda_kernel(c10::complex<float>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<float>(0, 0));
}
__global__ void _assert_async_cuda_kernel(c10::complex<double>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<double>(0, 0));
}
void _assert_async_cuda(const Tensor& self) {
auto n = self.numel();
TORCH_CHECK(n != 0, "Boolean value of Tensor with no values is ambiguous");
TORCH_CHECK(n < 2, "Boolean value of Tensor with more than one value is ambiguous");
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "_assert_async_cuda", [&] {
hipLaunchKernelGGL(( _assert_async_cuda_kernel), dim3(1), dim3(1), 0, stream, self.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
}} // namespace at::native
| d3887886db0b0401208595398d9369e2d0339ba9.cu | #include <ATen/NativeFunctions.h>
#include <ATen/NumericUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorCompare.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/cuda/CUDAApplyUtils.cuh>
namespace at { namespace native {
namespace {
void where_kernel_impl(TensorIterator &iter, ScalarType condition_type) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.dtype(), "where_cuda", [&] {
if (condition_type == at::ScalarType::Byte) {
gpu_kernel(
iter,
[=] GPU_LAMBDA (uint8_t cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
} else {
gpu_kernel(
iter,
[=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
}
});
}
void isposinf_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void isneginf_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void clamp_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower, scalar_t upper) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (at::_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_scalar_kernel_impl(TensorIterator& iter, Scalar min, Scalar max) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_scalar_cuda", [&] {
const auto lower = min.to<scalar_t>();
const auto upper = max.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (at::_isnan(v)) {
return v;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void clamp_min_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_cuda", [&] {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_min_scalar_kernel_impl(TensorIterator& iter, Scalar min) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_scalar_cuda", [&] {
auto lower = min.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::max(v, lower);
}
});
});
}
void clamp_max_kernel_impl(TensorIterator& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_cuda", [&] {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t v, scalar_t upper) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
void clamp_max_scalar_kernel_impl(TensorIterator& iter, Scalar max) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_max_scalar_cuda", [&] {
const auto upper = max.to<scalar_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(v)) {
return v;
} else {
return ::min(v, upper);
}
});
});
}
} // anonymous namespace
REGISTER_DISPATCH(where_kernel, &where_kernel_impl);
REGISTER_DISPATCH(isposinf_stub, &isposinf_kernel_impl);
REGISTER_DISPATCH(isneginf_stub, &isneginf_kernel_impl);
REGISTER_DISPATCH(clamp_stub, &clamp_kernel_impl);
REGISTER_DISPATCH(clamp_min_stub, &clamp_min_kernel_impl);
REGISTER_DISPATCH(clamp_max_stub, &clamp_max_kernel_impl);
REGISTER_DISPATCH(clamp_scalar_stub, &clamp_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_min_scalar_stub, &clamp_min_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_max_scalar_stub, &clamp_max_scalar_kernel_impl);
template <typename scalar_t>
__global__ void _assert_async_cuda_kernel(scalar_t* input) {
CUDA_KERNEL_ASSERT(input[0] != 0);
}
__global__ void _assert_async_cuda_kernel(c10::complex<float>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<float>(0, 0));
}
__global__ void _assert_async_cuda_kernel(c10::complex<double>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<double>(0, 0));
}
void _assert_async_cuda(const Tensor& self) {
auto n = self.numel();
TORCH_CHECK(n != 0, "Boolean value of Tensor with no values is ambiguous");
TORCH_CHECK(n < 2, "Boolean value of Tensor with more than one value is ambiguous");
auto stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "_assert_async_cuda", [&] {
_assert_async_cuda_kernel<<<1, 1, 0, stream>>>(self.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
}} // namespace at::native
|
a7018908e3c26969662f6d0d78c95510827ea4aa.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/test/device_test.h"
#include "dali/core/util.h"
#include "dali/core/dev_array.h"
#include "dali/core/small_vector.h"
#include "dali/core/span.h"
namespace dali {
DEVICE_TEST(CoreUtilsDev, Volume, 1, 1) {
int a0[] = { 42 };
DEV_EXPECT_EQ(volume(a0), 42);
int a1[] = { 2, 3, 4 };
DEV_EXPECT_EQ(volume(a1), 2*3*4);
DeviceArray<int, 2> b = { 10000000, 10000000 };
DEV_EXPECT_EQ(volume(b), 100000000000000LL);
}
DEVICE_TEST(CoreUtilsDev, Size, 1, 1) {
int a0[] = { 42 };
DEV_EXPECT_EQ(size(a0), 1u);
int a1[] = { 2, 3, 4 };
DEV_EXPECT_EQ(size(a1), 3u);
SmallVector<int, 5> v;
v.resize(10);
DEV_EXPECT_EQ(v.size(), 10u);
DEV_EXPECT_EQ(size(v), 10u);
}
DEFINE_TEST_KERNEL(CoreUtilsDev, Span, span<float> data) {
DEV_ASSERT_EQ(data.size(), 1000);
int x = threadIdx.x + blockDim.x*blockIdx.x;
if (x < data.size())
data[x] = x + 5;
__syncthreads();
auto blk = make_span(data.data() + blockDim.x*blockIdx.x, blockDim.x);
x = blockDim.x*blockIdx.x;
for (auto v : blk) {
if (x < data.size()) {
DEV_EXPECT_EQ(v, x + 5);
}
x++;
}
}
TEST(CoreUtilsDev, Span) {
using T = float;
const int N = 1000;
T *dev_data;
CUDA_CALL(hipMalloc(&dev_data, sizeof(T)*N));
DEVICE_TEST_CASE_BODY(CoreUtilsDev, Span, div_ceil(N, 256), 256, make_span(dev_data, N));
T host_data[N];
CUDA_CALL(hipMemcpy(host_data, dev_data, sizeof(host_data), hipMemcpyDeviceToHost));
for (int i = 0; i < N; i++)
EXPECT_EQ(host_data[i], i + 5);
CUDA_CALL(hipFree(dev_data));
}
TEST(CoreUtils, SpanFlatten) {
std::array<std::array<std::array<int, 4>, 3>, 2> arr;
for (int i = 0, n = 1; i < 2; i++) {
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 4; k++, n++) {
arr[i][j][k] = n;
}
}
}
span<int, 24> flat = flatten(make_span(arr));
for (int i = 0; i < 24; i++)
EXPECT_EQ(flat[i], i+1);
span<const int> cflat = flatten(make_cspan(&arr[0], 2));
for (int i = 0; i < 24; i++)
EXPECT_EQ(cflat[i], i+1);
}
TEST(CoreUtils, CTZ) {
int32_t i32 = 0;
int64_t i64 = 0;
EXPECT_EQ(ctz(i32), 32);
EXPECT_EQ(ctz(i64), 64);
i32 = 1;
i64 = 1;
EXPECT_EQ(ctz(i32), 0);
EXPECT_EQ(ctz(i64), 0);
i32 = 0b11010010101;
i64 = 0b10010110111;
EXPECT_EQ(ctz(i32), 0);
EXPECT_EQ(ctz(i64), 0);
i32 = 0b110100101010;
i64 = 0b100101101110;
EXPECT_EQ(ctz(i32), 1);
EXPECT_EQ(ctz(i64), 1);
i32 = 0b11010010101000;
i64 = 0b10010110111000;
EXPECT_EQ(ctz(i32), 3);
EXPECT_EQ(ctz(i64), 3);
i32 = -1;
i64 = -1;
uint32_t u32 = i32;
uint64_t u64 = i64;
for (int s = 0; s <= 32; s++) {
EXPECT_EQ(ctz(i32), s);
EXPECT_EQ(ctz(u32), s);
u32 <<= 1;
i32 = u32;
}
for (int s = 0; s <= 64; s++) {
EXPECT_EQ(ctz(i64), s);
EXPECT_EQ(ctz(u64), s);
u64 <<= 1;
i64 = u64;
}
}
} // namespace dali
| a7018908e3c26969662f6d0d78c95510827ea4aa.cu | // Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "dali/test/device_test.h"
#include "dali/core/util.h"
#include "dali/core/dev_array.h"
#include "dali/core/small_vector.h"
#include "dali/core/span.h"
namespace dali {
DEVICE_TEST(CoreUtilsDev, Volume, 1, 1) {
int a0[] = { 42 };
DEV_EXPECT_EQ(volume(a0), 42);
int a1[] = { 2, 3, 4 };
DEV_EXPECT_EQ(volume(a1), 2*3*4);
DeviceArray<int, 2> b = { 10000000, 10000000 };
DEV_EXPECT_EQ(volume(b), 100000000000000LL);
}
DEVICE_TEST(CoreUtilsDev, Size, 1, 1) {
int a0[] = { 42 };
DEV_EXPECT_EQ(size(a0), 1u);
int a1[] = { 2, 3, 4 };
DEV_EXPECT_EQ(size(a1), 3u);
SmallVector<int, 5> v;
v.resize(10);
DEV_EXPECT_EQ(v.size(), 10u);
DEV_EXPECT_EQ(size(v), 10u);
}
DEFINE_TEST_KERNEL(CoreUtilsDev, Span, span<float> data) {
DEV_ASSERT_EQ(data.size(), 1000);
int x = threadIdx.x + blockDim.x*blockIdx.x;
if (x < data.size())
data[x] = x + 5;
__syncthreads();
auto blk = make_span(data.data() + blockDim.x*blockIdx.x, blockDim.x);
x = blockDim.x*blockIdx.x;
for (auto v : blk) {
if (x < data.size()) {
DEV_EXPECT_EQ(v, x + 5);
}
x++;
}
}
TEST(CoreUtilsDev, Span) {
using T = float;
const int N = 1000;
T *dev_data;
CUDA_CALL(cudaMalloc(&dev_data, sizeof(T)*N));
DEVICE_TEST_CASE_BODY(CoreUtilsDev, Span, div_ceil(N, 256), 256, make_span(dev_data, N));
T host_data[N];
CUDA_CALL(cudaMemcpy(host_data, dev_data, sizeof(host_data), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++)
EXPECT_EQ(host_data[i], i + 5);
CUDA_CALL(cudaFree(dev_data));
}
TEST(CoreUtils, SpanFlatten) {
std::array<std::array<std::array<int, 4>, 3>, 2> arr;
for (int i = 0, n = 1; i < 2; i++) {
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 4; k++, n++) {
arr[i][j][k] = n;
}
}
}
span<int, 24> flat = flatten(make_span(arr));
for (int i = 0; i < 24; i++)
EXPECT_EQ(flat[i], i+1);
span<const int> cflat = flatten(make_cspan(&arr[0], 2));
for (int i = 0; i < 24; i++)
EXPECT_EQ(cflat[i], i+1);
}
TEST(CoreUtils, CTZ) {
int32_t i32 = 0;
int64_t i64 = 0;
EXPECT_EQ(ctz(i32), 32);
EXPECT_EQ(ctz(i64), 64);
i32 = 1;
i64 = 1;
EXPECT_EQ(ctz(i32), 0);
EXPECT_EQ(ctz(i64), 0);
i32 = 0b11010010101;
i64 = 0b10010110111;
EXPECT_EQ(ctz(i32), 0);
EXPECT_EQ(ctz(i64), 0);
i32 = 0b110100101010;
i64 = 0b100101101110;
EXPECT_EQ(ctz(i32), 1);
EXPECT_EQ(ctz(i64), 1);
i32 = 0b11010010101000;
i64 = 0b10010110111000;
EXPECT_EQ(ctz(i32), 3);
EXPECT_EQ(ctz(i64), 3);
i32 = -1;
i64 = -1;
uint32_t u32 = i32;
uint64_t u64 = i64;
for (int s = 0; s <= 32; s++) {
EXPECT_EQ(ctz(i32), s);
EXPECT_EQ(ctz(u32), s);
u32 <<= 1;
i32 = u32;
}
for (int s = 0; s <= 64; s++) {
EXPECT_EQ(ctz(i64), s);
EXPECT_EQ(ctz(u64), s);
u64 <<= 1;
i64 = u64;
}
}
} // namespace dali
|
466b01567e317c29b591b41c2de03a2d7e07a71d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Wei Hu
// =============================================================================
#include <thrust/extrema.h>
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChFsiForceExplicitSPH.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
//================================================================================================================================
namespace chrono {
namespace fsi {
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_G_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* G_i,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
Real3 posRadA = mR3(sortedPosRad[i_idx]);
Real h_i = sortedPosRad[i_idx].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// This is the elements of inverse of G
Real mGi[9] = {0.0};
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real d = length(rij);
if (d > SuppRadii || sortedRhoPreMu[j].w <= -2)
continue;
Real3 grad_i_wij = GradWh(rij, h_i);
Real3 grw_vj = grad_i_wij * paramsD.volume0;
mGi[0] -= rij.x * grw_vj.x;
mGi[1] -= rij.x * grw_vj.y;
mGi[2] -= rij.x * grw_vj.z;
mGi[3] -= rij.y * grw_vj.x;
mGi[4] -= rij.y * grw_vj.y;
mGi[5] -= rij.y * grw_vj.z;
mGi[6] -= rij.z * grw_vj.x;
mGi[7] -= rij.z * grw_vj.y;
mGi[8] -= rij.z * grw_vj.z;
}
}
}
Real Det = (mGi[0] * mGi[4] * mGi[8] -
mGi[0] * mGi[5] * mGi[7] -
mGi[1] * mGi[3] * mGi[8] +
mGi[1] * mGi[5] * mGi[6] +
mGi[2] * mGi[3] * mGi[7] -
mGi[2] * mGi[4] * mGi[6]);
if (abs(Det) > 0.01) {
Real OneOverDet = 1.0/Det;
G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet;
G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet;
G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet;
G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet;
G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet;
G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet;
G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet;
G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet;
G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet;
} else {
for (int i = 0; i < 9; i++) {
G_i[i] = 0.0;
}
G_i[0] = 1;
G_i[4] = 1;
G_i[8] = 1;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_A_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* A_i,
Real* G_i,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
Real3 posRadA = mR3(sortedPosRad[i_idx]);
Real h_i = sortedPosRad[i_idx].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real d = length(rij);
if (d > SuppRadii || sortedRhoPreMu[j].w <= -2)
continue;
Real h_j = sortedPosRad[j].w;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_ij = GradWh(rij, h_ij);
Real V_j = paramsD.markerMass/paramsD.rho0;
Real com_part = 0;
com_part = (G_i[0] * grad_ij.x + G_i[1] * grad_ij.y + G_i[2] * grad_ij.z) * V_j;
A_i[0] += rij.x * rij.x * com_part; // 111
A_i[1] += rij.x * rij.y * com_part; // 112
A_i[2] += rij.x * rij.z * com_part; // 113
A_i[3] += rij.y * rij.x * com_part; // 121
A_i[4] += rij.y * rij.y * com_part; // 122
A_i[5] += rij.y * rij.z * com_part; // 123
A_i[6] += rij.z * rij.x * com_part; // 131
A_i[7] += rij.z * rij.y * com_part; // 132
A_i[8] += rij.z * rij.z * com_part; // 133
com_part = (G_i[3] * grad_ij.x + G_i[4] * grad_ij.y + G_i[5] * grad_ij.z) * V_j;
A_i[9] += rij.x * rij.x * com_part; // 211
A_i[10] += rij.x * rij.y * com_part; // 212
A_i[11] += rij.x * rij.z * com_part; // 213
A_i[12] += rij.y * rij.x * com_part; // 221
A_i[13] += rij.y * rij.y * com_part; // 222
A_i[14] += rij.y * rij.z * com_part; // 223
A_i[15] += rij.z * rij.x * com_part; // 231
A_i[16] += rij.z * rij.y * com_part; // 232
A_i[17] += rij.z * rij.z * com_part; // 233
com_part = (G_i[6] * grad_ij.x + G_i[7] * grad_ij.y + G_i[8] * grad_ij.z) * V_j;
A_i[18] += rij.x * rij.x * com_part; // 311
A_i[19] += rij.x * rij.y * com_part; // 312
A_i[20] += rij.x * rij.z * com_part; // 313
A_i[21] += rij.y * rij.x * com_part; // 321
A_i[22] += rij.y * rij.y * com_part; // 322
A_i[23] += rij.y * rij.z * com_part; // 323
A_i[24] += rij.z * rij.x * com_part; // 331
A_i[25] += rij.z * rij.y * com_part; // 332
A_i[26] += rij.z * rij.z * com_part; // 333
}
}
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_L_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* A_i,
Real* L_i,
Real* G_i,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
Real3 posRadA = mR3(sortedPosRad[i_idx]);
Real h_i = sortedPosRad[i_idx].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real B[36] = {0.0};
Real L[6] = {0.0};
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real d = length(rij);
if (d > SuppRadii || sortedRhoPreMu[j].w <= -2)
continue;
Real3 eij = rij / d;
Real h_j = sortedPosRad[j].w;
// Real m_j = paramsD.markerMass;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_ij = GradWh(rij, h_ij);
Real V_j = paramsD.markerMass/paramsD.rho0;
Real com_part = 0;
// mn=11
Real XX = (eij.x * grad_ij.x);
Real XY = (eij.x * grad_ij.y + eij.y * grad_ij.x);
Real XZ = (eij.x * grad_ij.z + eij.z * grad_ij.x);
Real YY = (eij.y * grad_ij.y);
Real YZ = (eij.y * grad_ij.z + eij.z * grad_ij.y);
Real ZZ = (eij.z * grad_ij.z);
com_part = (A_i[0] * eij.x + A_i[9] * eij.y + A_i[18] * eij.z + rij.x * eij.x) * V_j;
B[6 * 0 + 0] += com_part * XX; // 11
B[6 * 0 + 1] += com_part * XY; // 12
B[6 * 0 + 2] += com_part * XZ; // 13
B[6 * 0 + 3] += com_part * YY; // 14
B[6 * 0 + 4] += com_part * YZ; // 15
B[6 * 0 + 5] += com_part * ZZ; // 15
// mn=12
com_part = (A_i[1] * eij.x + A_i[10] * eij.y + A_i[19] * eij.z + rij.x * eij.y) * V_j;
B[6 * 1 + 0] += com_part * XX; // 21
B[6 * 1 + 1] += com_part * XY; // 22
B[6 * 1 + 2] += com_part * XZ; // 23
B[6 * 1 + 3] += com_part * YY; // 24
B[6 * 1 + 4] += com_part * YZ; // 25
B[6 * 1 + 5] += com_part * ZZ; // 25
// mn=13
com_part = (A_i[2] * eij.x + A_i[11] * eij.y + A_i[20] * eij.z + rij.x * eij.z) * V_j;
B[6 * 2 + 0] += com_part * XX; // 31
B[6 * 2 + 1] += com_part * XY; // 32
B[6 * 2 + 2] += com_part * XZ; // 33
B[6 * 2 + 3] += com_part * YY; // 34
B[6 * 2 + 4] += com_part * YZ; // 35
B[6 * 2 + 5] += com_part * ZZ; // 36
// Note that we skip mn=21 since it is similar to mn=12
// mn=22
com_part = (A_i[4] * eij.x + A_i[13] * eij.y + A_i[22] * eij.z + rij.y * eij.y) * V_j;
B[6 * 3 + 0] += com_part * XX; // 41
B[6 * 3 + 1] += com_part * XY; // 42
B[6 * 3 + 2] += com_part * XZ; // 43
B[6 * 3 + 3] += com_part * YY; // 44
B[6 * 3 + 4] += com_part * YZ; // 45
B[6 * 3 + 5] += com_part * ZZ; // 46
// mn=23
com_part = (A_i[5] * eij.x + A_i[14] * eij.y + A_i[23] * eij.z + rij.y * eij.z) * V_j;
B[6 * 4 + 0] += com_part * XX; // 51
B[6 * 4 + 1] += com_part * XY; // 52
B[6 * 4 + 2] += com_part * XZ; // 53
B[6 * 4 + 3] += com_part * YY; // 54
B[6 * 4 + 4] += com_part * YZ; // 55
B[6 * 4 + 5] += com_part * ZZ; // 56
// mn=33
com_part = (A_i[8] * eij.x + A_i[17] * eij.y + A_i[26] * eij.z + rij.z * eij.z) * V_j;
B[6 * 5 + 0] += com_part * XX; // 61
B[6 * 5 + 1] += com_part * XY; // 62
B[6 * 5 + 2] += com_part * XZ; // 63
B[6 * 5 + 3] += com_part * YY; // 64
B[6 * 5 + 4] += com_part * YZ; // 65
B[6 * 5 + 5] += com_part * ZZ; // 66
}
}
}
inv6xdelta_mn(B, L);
L_i[0] = L[0];
L_i[1] = L[1];
L_i[2] = L[2];
L_i[3] = L[1];
L_i[4] = L[3];
L_i[5] = L[4];
L_i[6] = L[2];
L_i[7] = L[4];
L_i[8] = L[5];
// Real Det = (L_i[0] * L_i[4] * L_i[8] - L_i[0] * L_i[5] * L_i[7] - L_i[1] * L_i[3] * L_i[8] +
// L_i[1] * L_i[5] * L_i[6] + L_i[2] * L_i[3] * L_i[7] - L_i[2] * L_i[4] * L_i[6]);
// if (abs(Det) < 0.01) {
// for (int i = 0; i < 9; i++) {
// L_i[0 * 9 + i] = 0.0;
// L_i[0 * 9 + 0] = 1;
// L_i[0 * 9 + 4] = 1;
// L_i[0 * 9 + 8] = 1;
// }
// }
// printf("L Det %f\n", Det);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Shear_Stress_Rate(Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real3* sortedVelMas,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
Real3* sortedDerivTauXxYyZz,
Real3* sortedDerivTauXyXzYz,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[index].w > -0.5) {
return;
}
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real hA = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real tauxx = sortedTauXxYyZz[index].x;
Real tauyy = sortedTauXxYyZz[index].y;
Real tauzz = sortedTauXxYyZz[index].z;
Real tauxy = sortedTauXyXzYz[index].x;
Real tauxz = sortedTauXyXzYz[index].y;
Real tauyz = sortedTauXyXzYz[index].z;
Real tauzx = tauxz;
Real tauzy = tauyz;
Real tauyx = tauxy;
Real dTauxx = 0.0;
Real dTauyy = 0.0;
Real dTauzz = 0.0;
Real dTauxy = 0.0;
Real dTauxz = 0.0;
Real dTauyz = 0.0;
Real G_i[9] = {0.0};
calc_G_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,G_i,cellStart,cellEnd,numAllMarkers);
// get address in grid
int3 gridPos = calcGridPos(posRadA);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > SuppRadii)
continue;
Real3 velMasB = sortedVelMas[j];
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuB.w > -1.0) {
int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers);
if (!(bceIndexB >= 0 &&
bceIndexB < numObjectsD.numBoundaryMarkers + numObjectsD.numRigid_SphMarkers)) {
printf("Error! bceIndex out of bound, collideCell !\n");
}
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB]; // to check
velMasB = velMas_ModifiedBCE[bceIndexB]; // to check
velMasB = 2.0*velMasB - velMasA; // noslip BC
}
Real rhoB = rhoPresMuB.x;
Real hB = sortedPosRad[j].w;
Real mB = paramsD.markerMass;
Real3 gradW = GradWh(dist3, (hA + hB) * 0.5);
Real3 gradW_new;
gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z;
gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z;
gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z;
gradW = gradW_new;
// start to calculate the rate
Real Gm = paramsD.G_shear; // shear modulus of the material
Real half_mB_over_rhoB = 0.5 * (mB / rhoB);
Real3 vAB = velMasA - velMasB;
Real3 vAB_h = (velMasA - velMasB) * half_mB_over_rhoB;
// entries of strain rate tensor
Real exx = -2.0 * vAB_h.x * gradW.x;
Real eyy = -2.0 * vAB_h.y * gradW.y;
Real ezz = -2.0 * vAB_h.z * gradW.z;
Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x;
Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x;
Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y;
// entries of rotation rate (spin) tensor
// Real wxx = 0.0;
// Real wyy = 0.0;
// Real wzz = 0.0;
Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x;
Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x;
Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y;
Real wyx = -wxy;
// Real wzx = -wxz;
Real wzy = -wyz;
Real edia = 1.0 / 3.0 * (exx + eyy + ezz);
Real twoGm = 2.0 * Gm;
Real K_edia = paramsD.K_bulk*1.0*edia;
dTauxx += twoGm * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia;
dTauyy += twoGm * (eyy - edia) - 2.0 * (tauyx * wxy - tauyz * wyz) + K_edia;
dTauzz += twoGm * (ezz - edia) - 2.0 * (tauzx * wxz + tauzy * wyz) + K_edia;
dTauxy += twoGm * exy - (tauxx * wxy + tauxz * wzy) + (wxy * tauyy + wxz * tauzy);
dTauxz += twoGm * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz);
dTauyz += twoGm * eyz - (tauyx * wxz + tauyy * wyz) + (wyx * tauxz + wyz * tauzz);
}
}
}
}
}
sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz);
sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calcRho_kernel(Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real4* sortedRhoPreMu_old,
Real* _sumWij_rhoi,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
int density_reinit,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
sortedRhoPreMu_old[i_idx].y = Eos(sortedRhoPreMu_old[i_idx].x, sortedRhoPreMu_old[i_idx].w);
Real3 posRadA = mR3(sortedPosRad[i_idx]);
Real h_i = sortedPosRad[i_idx].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real sum_mW = 0;
Real sum_mW_rho = 0.0000001;
Real sum_W = 0.0;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) {
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > SuppRadii)
continue;
if (sortedRhoPreMu_old[j].w == -1) {
Real h_j = sortedPosRad[j].w;
Real m_j = paramsD.markerMass;
Real W3 = W3h(d, 0.5 * (h_j + h_i));
sum_mW += m_j * W3;
sum_W += W3;
sum_mW_rho += m_j * W3 / sortedRhoPreMu_old[j].x;
}
}
}
}
}
}
// sortedRhoPreMu[i_idx].x = sum_mW;
if ((density_reinit == 0) && (sortedRhoPreMu[i_idx].w == -1))
sortedRhoPreMu[i_idx].x = sum_mW / sum_mW_rho;
if ((sortedRhoPreMu[i_idx].x > 3 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0.01 * paramsD.rho0) &&
sortedRhoPreMu[i_idx].w == -1)
printf("(calcRho_kernel)density marker %d, sum_mW=%f, sum_W=%f, h_i=%f\n", i_idx, sum_mW, sum_W, h_i);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void modifyPressure(Real4& rhoPresMuB, const Real3& dist3Alpha) {
// body force in x direction
rhoPresMuB.y = (dist3Alpha.x > 0.5 * paramsD.boxDims.x) ? (rhoPresMuB.y - paramsD.deltaPress.x) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.x < -0.5 * paramsD.boxDims.x) ? (rhoPresMuB.y + paramsD.deltaPress.x) : rhoPresMuB.y;
// body force in x direction
rhoPresMuB.y = (dist3Alpha.y > 0.5 * paramsD.boxDims.y) ? (rhoPresMuB.y - paramsD.deltaPress.y) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.y < -0.5 * paramsD.boxDims.y) ? (rhoPresMuB.y + paramsD.deltaPress.y) : rhoPresMuB.y;
// body force in x direction
rhoPresMuB.y = (dist3Alpha.z > 0.5 * paramsD.boxDims.z) ? (rhoPresMuB.y - paramsD.deltaPress.z) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.z < -0.5 * paramsD.boxDims.z) ? (rhoPresMuB.y + paramsD.deltaPress.z) : rhoPresMuB.y;
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 CubicSolve(Real aa, Real bb, Real cc, Real dd) {
Real disc, q, r, dum1, dum2, term1, r13;
bb /= aa;
cc /= aa;
dd /= aa;
if (aa == 0) {
return mR3(0, 0, 0);
}
if (abs(bb) < 1e-9) {
return mR3(0, 0, 0);
}
if (abs(cc) < 1e-9) {
return mR3(0, 0, 0);
}
if (abs(dd) < 1e-9) {
return mR3(0, 0, 0);
}
q = (3.0 * cc - (bb * bb)) / 9.0;
r = -(27.0 * dd) + bb * (9.0 * cc - 2.0 * (bb * bb));
r /= 54.0;
disc = q * q * q + r * r;
term1 = (bb / 3.0);
/* dataForm.x1Im.value = 0; //The first root is always real.
if (disc > 0) { // one root real, two are complex
s = r + Math.sqrt(disc);
s = ((s < 0) ? -Math.pow(-s, (1.0/3.0)) : Math.pow(s, (1.0/3.0)));
t = r - Math.sqrt(disc);
t = ((t < 0) ? -Math.pow(-t, (1.0/3.0)) : Math.pow(t, (1.0/3.0)));
dataForm.x1Re.value = -term1 + s + t;
term1 += (s + t)/2.0;
dataForm.x3Re.value = dataForm.x2Re.value = -term1;
term1 = Math.sqrt(3.0)*(-t + s)/2;
dataForm.x2Im.value = term1;
dataForm.x3Im.value = -term1;
return;
}
// End if (disc > 0)
// The remaining options are all real
dataForm.x3Im.value = dataForm.x2Im.value = 0;
if (disc == 0){ // All roots real, at least two are equal.
r13 = ((r < 0) ? -Math.pow(-r,(1.0/3.0)) : Math.pow(r,(1.0/3.0)));
dataForm.x1Re.value = -term1 + 2.0*r13;
dataForm.x3Re.value = dataForm.x2Re.value = -(r13 + term1);
return;
} // End if (disc == 0)
*/
Real xRex, xRey, xRez;
// have complex root
if (disc > 0) {
xRex = 0.0;
xRey = 0.0;
xRez = 0.0;
return mR3(xRex, xRey, xRez);
}
// All roots real, at least two are equal.
if (disc == 0) {
if (r < 0) {
r13 = pow(-r, (1.0 / 3.0));
} else {
r13 = pow(r, (1.0 / 3.0));
}
xRex = -term1 + 2.0 * r13;
xRey = -(r13 + term1);
xRez = xRey;
return mR3(xRex, xRey, xRez);
}
// All roots are real and unequal (to get here, q < 0)
q = -q;
dum1 = q * q * q;
dum2 = r / (sqrt(dum1 + 1.0e-9));
if ((dum2 >= 0) && (dum2 <= 1)) {
dum1 = acos(dum2);
} else {
xRex = 0.0;
xRey = 0.0;
xRez = 0.0;
return mR3(xRex, xRey, xRez);
}
r13 = 2.0 * sqrt(q);
xRex = -term1 + r13 * cos(dum1 / 3.0);
xRey = -term1 + r13 * cos((dum1 + 2.0 * 3.1415926) / 3.0);
xRez = -term1 + r13 * cos((dum1 + 4.0 * 3.1415926) / 3.0);
return mR3(xRex, xRey, xRez);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 CubicEigen(Real4 c1, Real4 c2, Real4 c3) {
Real a = c1.x;
Real b = c1.y;
Real c = c1.z;
Real d = c1.w;
Real l = c2.x;
Real m = c2.y;
Real n = c2.z;
Real k = c2.w;
Real p = c3.x;
Real q = c3.y;
Real r = c3.z;
Real s = c3.w;
Real D = (a * m * r + b * p * n + c * l * q) - (a * n * q + b * l * r + c * m * p) + 1.0e-9;
Real x = ((b * r * k + c * m * s + d * n * q) - (b * n * s + c * q * k + d * m * r)) / D;
Real y = ((a * n * s + c * p * k + d * l * r) - (a * r * k + c * l * s + d * n * p)) / D;
Real z = ((a * q * k + b * l * s + d * m * p) - (a * m * s + b * p * k + d * l * q)) / D;
b = b + 1.0e-9;
x = 1.0e0;
z = (-l + a * m / b) / (n - c * m / b);
y = (-a - c * z) / b;
Real R = sqrt(x * x + y * y + z * z);
x = x / R;
y = y / R;
z = z / R;
// if(abs(D) < 1){
// return mR3(0,0,0);
// }
// if(abs(m) < 0.1){
// x=0;
// y=1;
// z=0;
// return mR3(x,y,z);
// }
// else{
// y=0;
// if(abs(c) > 0.1){
// x=1;
// z=-a/c;
// return mR3(x,y,z);
// }
// if(abs(a) > 0.1){
// z=1;
// x=-c/a;
// return mR3(x,y,z);
// }
// }
return mR3(x, y, z);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 DifVelocityRho(float G_i[9],
Real3 dist3,
Real d,
Real4 posRadA,
Real4 posRadB,
Real3 velMasA,
Real3 vel_XSPH_A,
Real3 velMasB,
Real3 vel_XSPH_B,
Real4 rhoPresMuA,
Real4 rhoPresMuB,
Real multViscosity) {
if (rhoPresMuA.w > -1 && rhoPresMuB.w > -1)
return mR4(0.0);
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
// Continuty equation
Real derivRho = paramsD.markerMass * dot(vel_XSPH_A - vel_XSPH_B, gradW);
// Viscosity
Real rAB_Dot_GradWh = dot(dist3, gradW);
Real rAB_Dot_GradWh_OverDist = rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML);
Real3 derivV = - paramsD.markerMass * (rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW
+ paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu0
* rAB_Dot_GradWh_OverDist * (velMasA - velMasB) / square(rhoPresMuA.x + rhoPresMuB.x);
// Artificial viscosity
Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3);
if ( (vAB_Dot_rAB < 0.0) && (1==0)) { // change to 1==1 if needs artificial viscosity
Real alpha = paramsD.Ar_vis_alpha;
Real c_ab = paramsD.Cs;
Real rho = 0.5f * (rhoPresMuA.x * rhoPresMuB.x);
Real nu = -alpha * paramsD.HSML * c_ab / rho;
Real derivM1 = -paramsD.markerMass * (nu * vAB_Dot_rAB / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML));
derivV.x += derivM1 * gradW.x;
derivV.y += derivM1 * gradW.y;
derivV.z += derivM1 * gradW.z;
}
return mR4(derivV, derivRho);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 DifVelocityRho_ElasticSPH(Real3 gradW,
Real3 dist3,
Real d,
Real invd,
Real4 posRadA,
Real4 posRadB,
Real3 velMasA_in,
Real3 vel_XSPH_A_in,
Real3 velMasB_in,
Real3 vel_XSPH_B_in,
Real4 rhoPresMuA,
Real4 rhoPresMuB,
Real multViscosity,
Real3 tauXxYyZz_A_in,
Real3 tauXyXzYz_A_in,
Real3 tauXxYyZz_B_in,
Real3 tauXyXzYz_B_in) {
Real3 velMasA = velMasA_in;
Real3 velMasB = velMasB_in;
Real3 vel_XSPH_A = vel_XSPH_A_in;
Real3 vel_XSPH_B = vel_XSPH_B_in;
Real3 tauXxYyZz_A = tauXxYyZz_A_in;
Real3 tauXxYyZz_B = tauXxYyZz_B_in;
Real3 tauXyXzYz_A = tauXyXzYz_A_in;
Real3 tauXyXzYz_B = tauXyXzYz_B_in;
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5)
return mR4(0.0);
if (rhoPresMuA.w < -0.5 && rhoPresMuB.w > -0.5){
tauXxYyZz_B = tauXxYyZz_A;
tauXyXzYz_B = tauXyXzYz_A;
vel_XSPH_B = 2.0*vel_XSPH_B - vel_XSPH_A; // noslip BC
// velMasB = 2.0*velMasB - velMasA; // noslip BC
}
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w < -0.5){
tauXxYyZz_A = tauXxYyZz_B;
tauXyXzYz_A = tauXyXzYz_B;
vel_XSPH_A = 2.0*vel_XSPH_A - vel_XSPH_B;
}
Real txxA = tauXxYyZz_A.x;
Real tyyA = tauXxYyZz_A.y;
Real tzzA = tauXxYyZz_A.z;
Real txyA = tauXyXzYz_A.x;
Real txzA = tauXyXzYz_A.y;
Real tyzA = tauXyXzYz_A.z;
Real txxB = tauXxYyZz_B.x;
Real tyyB = tauXxYyZz_B.y;
Real tzzB = tauXxYyZz_B.z;
Real txyB = tauXyXzYz_B.x;
Real txzB = tauXyXzYz_B.y;
Real tyzB = tauXyXzYz_B.z;
// Real rhoA = rhoPresMuA.x;
// Real rhoB = rhoPresMuB.x;
// Real rhoA2 = rhoA * rhoA;
// Real rhoB2 = rhoB * rhoB;
Real Mass = paramsD.markerMass;
Real MassOverRhoA2 = Mass * paramsD.invrho0 * paramsD.invrho0;
Real MassOverRhoB2 = MassOverRhoA2;
Real3 MA_gradW = gradW * MassOverRhoA2;
Real3 MB_gradW = gradW * MassOverRhoB2;
Real derivVx = (txxA * MA_gradW.x + txyA * MA_gradW.y + txzA * MA_gradW.z) +
(txxB * MB_gradW.x + txyB * MB_gradW.y + txzB * MB_gradW.z) ;
Real derivVy = (txyA * MA_gradW.x + tyyA * MA_gradW.y + tyzA * MA_gradW.z) +
(txyB * MB_gradW.x + tyyB * MB_gradW.y + tyzB * MB_gradW.z) ;
Real derivVz = (txzA * MA_gradW.x + tyzA * MA_gradW.y + tzzA * MA_gradW.z) +
(txzB * MB_gradW.x + tyzB * MB_gradW.y + tzzB * MB_gradW.z) ;
// TODO: Visco-plastic model
// Real vel = length(velMasA);
// if(vel > 0.3){
// Real rAB_Dot_GradWh = dot(dist3, gradW);
// Real rAB_Dot_GradWh_OverDist = rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML);
// Real3 derivV = - paramsD.markerMass *(rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW
// + paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu_fric_s
// * pow(rhoPresMuA.x + rhoPresMuB.x, Real(-2)) * rAB_Dot_GradWh_OverDist * (velMasA - velMasB);
// derivVx = derivV.x;
// derivVy = derivV.y;
// derivVz = derivV.z;
// }
// Artificial viscosity
Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3);
// if (vAB_Dot_rAB < 0.0) {
Real alpha = paramsD.Ar_vis_alpha;
Real c_ab = paramsD.Cs;
// Real rho = 0.5f * (rhoA + rhoB);
Real nu = -alpha * paramsD.HSML * c_ab * paramsD.invrho0;
Real derivM1 = -Mass * (nu * vAB_Dot_rAB * (invd * invd));//+ paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML
derivVx += derivM1 * gradW.x;
derivVy += derivM1 * gradW.y;
derivVz += derivM1 * gradW.z;
// }
// TOTO: Damping force
// if (1 == 0) {
// Real xi0 = paramsD.Vis_Dam;
// Real E0 = paramsD.E_young;
// Real h0 = paramsD.HSML;
// Real Cd = xi0 * sqrt(E0 / (rhoA * h0 * h0));
// derivVx -= Cd * velMasA.x;
// derivVy -= Cd * velMasA.y;
// derivVz -= Cd * velMasA.z;
// }
// Real derivRho = Mass * dot(vel_XSPH_A - vel_XSPH_B, gradW);
return mR4(derivVx, derivVy, derivVz, 0.0);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 GradientOperator( float G_i[9],
Real3 dist3,
Real4 posRadA,
Real4 posRadB,
Real fA,
Real fB,
Real4 rhoPresMuA,
Real4 rhoPresMuB) {
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
Real3 gradW_new;
gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z;
gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z;
gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z;
Real Vol = paramsD.markerMass/rhoPresMuB.x;
Real fji = fB - fA;
Real Gra_ij_x = fji*gradW_new.x * Vol;
Real Gra_ij_y = fji*gradW_new.y * Vol;
Real Gra_ij_z = fji*gradW_new.z * Vol;
return mR3(Gra_ij_x, Gra_ij_y, Gra_ij_z);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 LaplacianOperator( float G_i[9],
float L_i[9],
Real3 dist3,
Real4 posRadA,
Real4 posRadB,
Real fA,
Real fB,
Real4 rhoPresMuA,
Real4 rhoPresMuB) {
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
Real d = length(dist3);
Real3 eij = dist3/d;
Real Vol = paramsD.markerMass/rhoPresMuB.x;
Real fij = fA - fB;
Real ex_Gwx = eij.x*gradW.x;
Real ex_Gwy = eij.x*gradW.y;
Real ex_Gwz = eij.x*gradW.z;
Real ey_Gwx = eij.y*gradW.x;
Real ey_Gwy = eij.y*gradW.y;
Real ey_Gwz = eij.y*gradW.z;
Real ez_Gwx = eij.z*gradW.x;
Real ez_Gwy = eij.z*gradW.y;
Real ez_Gwz = eij.z*gradW.z;
Real Part1 = L_i[0]*ex_Gwx + L_i[1]*ex_Gwy + L_i[2]*ex_Gwz
+ L_i[3]*ey_Gwx + L_i[4]*ey_Gwy + L_i[5]*ey_Gwz
+ L_i[6]*ez_Gwx + L_i[7]*ez_Gwy + L_i[8]*ez_Gwz;
Real Part2 = fij/d * Vol;
Real3 Part3 = mR3(-eij.x, -eij.y, -eij.z) * Vol;
return mR4(2.0*Part1*Part2, Part3.x*(2.0*Part1), Part3.y*(2.0*Part1), Part3.z*(2.0*Part1));
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void EOS(Real4* sortedRhoPreMu, uint numAllMarkers, volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
sortedRhoPreMu[index].y = Eos(sortedRhoPreMu[index].x, sortedRhoPreMu[index].w);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Navier_Stokes(Real4* sortedDerivVelRho,
Real3* shift_r,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
Real MaxVel,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5){
sortedDerivVelRho[index] = mR4(0.0);
return;
}
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real4 rhoPresMuA = sortedRhoPreMu[index];
Real4 derivVelRho = mR4(0.0);
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real G_i[9] = {0.0};
Real A_i[27] = {0.0};
Real L_i[9] = {0.0};
calc_G_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,G_i,cellStart,cellEnd,numAllMarkers);
if(!paramsD.elastic_SPH){
calc_A_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,A_i,G_i,cellStart,cellEnd,numAllMarkers);
calc_L_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,A_i,L_i,G_i,cellStart,cellEnd,numAllMarkers);
}
float Gi[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0};
float Li[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0};
Gi[0] = G_i[0];
Gi[1] = G_i[1];
Gi[2] = G_i[2];
Gi[3] = G_i[3];
Gi[4] = G_i[4];
Gi[5] = G_i[5];
Gi[6] = G_i[6];
Gi[7] = G_i[7];
Gi[8] = G_i[8];
Li[0] = L_i[0];
Li[1] = L_i[1];
Li[2] = L_i[2];
Li[3] = L_i[3];
Li[4] = L_i[4];
Li[5] = L_i[5];
Li[6] = L_i[6];
Li[7] = L_i[7];
Li[8] = L_i[8];
Real3 preGra = mR3(0.0);
Real3 velxGra = mR3(0.0);
Real3 velyGra = mR3(0.0);
Real3 velzGra = mR3(0.0);
Real4 velxLap = mR4(0.0);
Real4 velyLap = mR4(0.0);
Real4 velzLap = mR4(0.0);
Real radii = paramsD.MULT_INITSPACE * paramsD.HSML*1.241;
Real invRadii = 1.0/radii;
Real3 v_ab = (velMasA + velMasA)*0.5;
Real v_ab_m = length(v_ab);
Real bsvdT = paramsD.beta_shifting * v_ab_m * paramsD.dT ;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real3 inner_sum = mR3(0.0);
// Real mi_bar = 0.0, r0 = 0.0;
Real sum_w_i = W3h(0.0, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE);
int N_ = 1;
int N_s = 0;
for (int x = -1; x <= 1; x++) {
for (int y = -1; y <= 1; y++) {
for (int z = -1; z <= 1; z++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
// Real3 dist3Alpha = posRadA - posRadB;
Real3 dist3 = Distance(posRadA, posRadB); // change from B-A to A-B
Real d = length(dist3);
if (d > SuppRadii)
continue;
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuA.w > -.1 && rhoPresMuB.w > -.1) { // no rigid-rigid force
continue;
}
Real invd = 1.0 / d;
// modifyPressure(rhoPresMuB, dist3Alpha);
// if (!(isfinite(rhoPresMuB.x) && isfinite(rhoPresMuB.y) && isfinite(rhoPresMuB.z))) {
// printf("Error! particle rhoPresMuB is NAN: thrown from modifyPressure !\n");
// }
Real3 velMasB = sortedVelMas[j];
if (rhoPresMuB.w > -1.0) {
int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers);
// if (!(bceIndexB >= 0 &&
// bceIndexB < numObjectsD.numBoundaryMarkers + numObjectsD.numRigid_SphMarkers)) {
// printf("Error! bceIndex out of bound, collideCell !\n");
// }
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB];
velMasB = velMas_ModifiedBCE[bceIndexB];
}
Real multViscosit = 1;
// if (!(isfinite(rhoPresMuB.x) && isfinite(rhoPresMuB.y) && isfinite(rhoPresMuB.z))) {
// printf("Error! particle rhoPresMuB is NAN: thrown from collideCell ! type=%f\n",
// rhoPresMuB.w);
// }
if(paramsD.elastic_SPH){
Real3 gradW = GradWh(dist3, paramsD.HSML);
Real3 gradW_new;
gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z;
gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z;
gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z;
gradW = gradW_new;
derivVelRho += DifVelocityRho_ElasticSPH(gradW, dist3, d, invd,
sortedPosRad[index], sortedPosRad[j], velMasA, velMasA,
velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit,
sortedTauXxYyZz[index], sortedTauXyXzYz[index],
sortedTauXxYyZz[j], sortedTauXyXzYz[j]);
}
else{
derivVelRho += DifVelocityRho(Gi, dist3, d, sortedPosRad[index], sortedPosRad[j], velMasA, velMasA,
velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit);
preGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
-rhoPresMuA.y, rhoPresMuB.y, rhoPresMuA, rhoPresMuB);
velxGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB);
velyGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB);
velzGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB);
velxLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB);
velyLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB);
velzLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB);
}
if (d > paramsD.HSML*1.0e-9 && sum_w_i < paramsD.C_Wi) {
sum_w_i = sum_w_i + W3h(d, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE);
N_ = N_ + 1;
}
// find particles that have contact with this particle
if(N_s < 12 && d < 2.0*radii){
Real Pen = (radii - d) * invRadii;
Real3 r_0 = bsvdT * invd * dist3 ;
Real3 r_s = r_0 * Pen;
if (d < 1.0*radii) {
inner_sum += 3.0*r_s;
N_s = N_s + 1;
}
else if (d < 1.1*radii) {
inner_sum += 1.0*r_s;
N_s = N_s + 1;
}
else {
inner_sum += 0.1 * 1.0 * (-r_0);
N_s = N_s + 1;
}
}
}
}
}
}
}
if(paramsD.elastic_SPH){
if(sum_w_i < paramsD.C_Wi){
derivVelRho.w = -1.0;
}
else{
derivVelRho.w = 1.0;
}
}
if(!paramsD.elastic_SPH){
Real nu = paramsD.mu0/paramsD.rho0;
Real dvxdt = -preGra.x/rhoPresMuA.x + (velxLap.x + velxGra.x*velxLap.y + velxGra.y*velxLap.z + velxGra.z*velxLap.w) * nu;
Real dvydt = -preGra.y/rhoPresMuA.x + (velyLap.x + velyGra.x*velyLap.y + velyGra.y*velyLap.z + velyGra.z*velyLap.w) * nu;
Real dvzdt = -preGra.z/rhoPresMuA.x + (velzLap.x + velzGra.x*velzLap.y + velzGra.y*velzLap.z + velzGra.z*velzLap.w) * nu;
Real drhodt = -paramsD.rho0*(velxGra.x + velyGra.y + velzGra.z);
Real Det_G = (Gi[0] * Gi[4] * Gi[8] - Gi[0] * Gi[5] * Gi[7] - Gi[1] * Gi[3] * Gi[8] +
Gi[1] * Gi[5] * Gi[6] + Gi[2] * Gi[3] * Gi[7] - Gi[2] * Gi[4] * Gi[6]);
Real Det_L = (Li[0] * Li[4] * Li[8] - Li[0] * Li[5] * Li[7] - Li[1] * Li[3] * Li[8] +
Li[1] * Li[5] * Li[6] + Li[2] * Li[3] * Li[7] - Li[2] * Li[4] * Li[6]);
if(rhoPresMuA.w == -1){
if( Det_G > 0.9 && Det_G < 1.1 && Det_L > 0.9 && Det_L < 1.1 && sum_w_i > 0.9){
derivVelRho = mR4(dvxdt, dvydt, dvzdt, drhodt);
}
}
}
if (!(isfinite(derivVelRho.x) && isfinite(derivVelRho.y) && isfinite(derivVelRho.z))) {
printf("Error! particle derivVel is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n");
*isErrorD = true;
}
if (!(isfinite(derivVelRho.w))) {
printf("Error! particle derivRho is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n");
*isErrorD = true;
}
// add gravity and other body force to fluid markers
if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5){
Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity;
derivVelRho += mR4(totalFluidBodyForce3);
}
sortedDerivVelRho[index] = derivVelRho;
Real det_r_max = length(0.05*velMasA*paramsD.dT);
Real det_r_A = length(inner_sum);
if(det_r_A < det_r_max){
shift_r[index] = inner_sum;
}
else{
shift_r[index] = inner_sum * det_r_max/(det_r_A + 1e-9);
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void NS_SSR( Real4* sortedDerivVelRho,
Real3* sortedDerivTauXxYyZz,
Real3* sortedDerivTauXyXzYz,
Real3* shift_r,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5){
sortedDerivVelRho[index] = mR4(0.0);
sortedDerivTauXxYyZz[index] = mR3(0.0);
sortedDerivTauXyXzYz[index] = mR3(0.0);
return;
}
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real4 rhoPresMuA = sortedRhoPreMu[index];
Real hA = sortedPosRad[index].w;
Real4 derivVelRho = mR4(0.0);
Real3 deltaV = mR3(0);
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
uint j_list[150];
uint j_num = 0;
// Get address in grid
int3 gridPos = calcGridPos(posRadA);
// Find the neighbor particle list
for (int x = -1; x <= 1; x++) {
for (int y = -1; y <= 1; y++) {
for (int z = -1; z <= 1; z++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
// Real d = length(dist3);
Real dd = dist3.x*dist3.x + dist3.y*dist3.y + dist3.z*dist3.z;
if (dd < SqRadii){
j_list[j_num] = j;
j_num++;
}
}
}
}
}
}
Real tauxx = sortedTauXxYyZz[index].x;
Real tauyy = sortedTauXxYyZz[index].y;
Real tauzz = sortedTauXxYyZz[index].z;
Real tauxy = sortedTauXyXzYz[index].x;
Real tauxz = sortedTauXyXzYz[index].y;
Real tauyz = sortedTauXyXzYz[index].z;
Real tauzx = tauxz;
Real tauzy = tauyz;
Real tauyx = tauxy;
Real dTauxx = 0.0;
Real dTauyy = 0.0;
Real dTauzz = 0.0;
Real dTauxy = 0.0;
Real dTauxz = 0.0;
Real dTauyz = 0.0;
// Calculate the correction matrix for gradient operator
Real G_i[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0};
{
Real mGi[9] = {0.0};
for(uint n = 0; n < j_num; n++){
uint j = j_list[n];
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
// Real d = length(rij);
Real dd = rij.x*rij.x + rij.y*rij.y + rij.z*rij.z;
if (dd > SqRadii || sortedRhoPreMu[j].w <= -2)
continue;
Real3 grad_i_wij = GradWh(rij, hA);
Real3 grw_vj = grad_i_wij * paramsD.volume0;
mGi[0] -= rij.x * grw_vj.x;
mGi[1] -= rij.x * grw_vj.y;
mGi[2] -= rij.x * grw_vj.z;
mGi[3] -= rij.y * grw_vj.x;
mGi[4] -= rij.y * grw_vj.y;
mGi[5] -= rij.y * grw_vj.z;
mGi[6] -= rij.z * grw_vj.x;
mGi[7] -= rij.z * grw_vj.y;
mGi[8] -= rij.z * grw_vj.z;
}
Real Det = (mGi[0] * mGi[4] * mGi[8] -
mGi[0] * mGi[5] * mGi[7] -
mGi[1] * mGi[3] * mGi[8] +
mGi[1] * mGi[5] * mGi[6] +
mGi[2] * mGi[3] * mGi[7] -
mGi[2] * mGi[4] * mGi[6]);
if (abs(Det) > 0.01) {
Real OneOverDet = 1.0 / Det;
G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet;
G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet;
G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet;
G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet;
G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet;
G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet;
G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet;
G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet;
G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet;
}
}
Real radii = paramsD.MULT_INITSPACE * paramsD.HSML*1.241;//1.129;//1.241
Real invRadii = 1.0/radii;
Real3 v_ab = (velMasA + velMasA)*0.5;
Real v_ab_m = length(v_ab);
Real bsvdT = paramsD.beta_shifting * v_ab_m * paramsD.dT ;
Real3 inner_sum = mR3(0.0);
Real sum_w_i = W3h(0.0, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE);
int N_ = 1;
int N_s = 0;
// Get the interaction from neighbor particles
for(uint n = 0; n < j_num; n++){
uint j = j_list[n];
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > SuppRadii)
continue;
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuA.w > -.1 && rhoPresMuB.w > -.1) {
continue; // No BCE-BCE interaction
}
Real invd = 1.0 / d;
Real3 velMasB = sortedVelMas[j];
if (rhoPresMuB.w > -1.0) {
int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers);
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB];
velMasB = velMas_ModifiedBCE[bceIndexB];
}
Real multViscosit = 1;
// For granular material dynamics
// Real rhoB = rhoPresMuB.x;
Real hB = sortedPosRad[j].w;
// Real mB = paramsD.markerMass;
Real3 gradW = GradWh(dist3, (hA + hB) * 0.5);
Real3 gradW_new;
gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z;
gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z;
gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z;
gradW = gradW_new;
derivVelRho += DifVelocityRho_ElasticSPH(gradW, dist3, d, invd,
sortedPosRad[index], sortedPosRad[j], velMasA, velMasA,
velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit,
sortedTauXxYyZz[index], sortedTauXyXzYz[index],
sortedTauXxYyZz[j], sortedTauXyXzYz[j]);
if(sortedRhoPreMu[index].w < -0.5){
// start to calculate the stress rate
Real Gm = paramsD.G_shear; // shear modulus of the material
Real half_mB_over_rhoB = 0.5 * paramsD.volume0; //(mB / rhoB);
Real3 velMasB_new = velMasB;
if (rhoPresMuB.w > -1.0)
velMasB_new = 2.0*velMasB - velMasA; // noslip BC
Real3 vAB = velMasA - velMasB_new;
Real3 vAB_h = vAB * half_mB_over_rhoB;
// entries of strain rate tensor
Real exx = -2.0 * vAB_h.x * gradW.x;
Real eyy = -2.0 * vAB_h.y * gradW.y;
Real ezz = -2.0 * vAB_h.z * gradW.z;
Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x;
Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x;
Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y;
// entries of rotation rate (spin) tensor
// Real wxx = 0.0;
// Real wyy = 0.0;
// Real wzz = 0.0;
Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x;
Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x;
Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y;
Real wyx = -wxy;
// Real wzx = -wxz;
Real wzy = -wyz;
Real edia = 1.0 / 3.0 * (exx + eyy + ezz);
Real twoGm = 2.0 * Gm;
Real K_edia = paramsD.K_bulk*1.0*edia;
dTauxx += twoGm * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia;
dTauyy += twoGm * (eyy - edia) - 2.0 * (tauyx * wxy - tauyz * wyz) + K_edia;
dTauzz += twoGm * (ezz - edia) - 2.0 * (tauzx * wxz + tauzy * wyz) + K_edia;
dTauxy += twoGm * exy - (tauxx * wxy + tauxz * wzy) + (wxy * tauyy + wxz * tauzy);
dTauxz += twoGm * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz);
dTauyz += twoGm * eyz - (tauyx * wxz + tauyy * wyz) + (wyx * tauxz + wyz * tauzz);
}
// Do integration for the kernel function
if (d > paramsD.HSML*1.0e-9) {
Real Wab = W3h(d, sortedPosRad[index].w);
sum_w_i = sum_w_i + Wab * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE);
// XSPH
if (rhoPresMuB.w > -1.5 && rhoPresMuB.w < -0.5){
deltaV += paramsD.volume0 * (velMasB - velMasA) * Wab;
}
N_ = N_ + 1;
}
// Find particles that have contact with this particle
if(d < 1.25*radii && rhoPresMuB.w <-0.5){
Real Pen = (radii - d) * invRadii;
Real3 r_0 = bsvdT * invd * dist3 ;
Real3 r_s = r_0 * Pen;
if (d < 1.0*radii) {
inner_sum += 3.0*r_s;
N_s = N_s + 1;
}
else if (d < 1.1*radii) {
inner_sum += 1.0*r_s;
N_s = N_s + 1;
}
else {
inner_sum += 0.1 * 1.0 * (-r_0);
N_s = N_s + 1;
}
}
}
// Check particles who have not enough neighbor particles (only for granular now)
if(sum_w_i < paramsD.C_Wi){
derivVelRho.w = -1.0;
}
else{
derivVelRho.w = 1.0;
}
// Calculate the shifting vector
Real det_r_max = length(0.05*velMasA*paramsD.dT);
Real det_r_A = length(inner_sum);
if(det_r_A < det_r_max){
shift_r[index] = inner_sum;
}
else{
shift_r[index] = inner_sum * det_r_max/(det_r_A + 1e-9);
}
shift_r[index] += paramsD.EPS_XSPH * deltaV * paramsD.dT;
shift_r[index] = shift_r[index] * (1.0 / paramsD.dT);
// Add gravity and other body force to fluid markers
if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5){
Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity;
derivVelRho += mR4(totalFluidBodyForce3);
}
sortedDerivVelRho[index] = derivVelRho;
sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz);
sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcVel_XSPH_D(Real3* vel_XSPH_Sorted_D, // output: new velocity
Real4* sortedPosRad_old, // input: sorted positions
Real4* sortedPosRad, // input: sorted positions
Real3* sortedVelMas, // input: sorted velocities
Real4* sortedRhoPreMu,
Real3* shift_r,
uint* gridMarkerIndex, // input: sorted particle indices
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
Real4 rhoPreMuA = sortedRhoPreMu[index];
Real3 velMasA = sortedVelMas[index];
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real3 posRadA = mR3(sortedPosRad_old[index]);
Real3 deltaV = mR3(0);
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real3 inner_sum = mR3(0.0);
// Real mi_bar = 0.0, r0 = 0.0;
Real3 dV = mR3(0.0f);
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) { // check not colliding with self
Real3 posRadB = mR3(sortedPosRad_old[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > SuppRadii)
continue;
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuB.w != -1.0)
continue;
Real3 velMasB = sortedVelMas[j];
Real rho_bar = 0.5 * (rhoPreMuA.x + rhoPresMuB.x);
deltaV += paramsD.markerMass * (velMasB - velMasA) *
W3h(d, (sortedPosRad_old[index].w + sortedPosRad_old[j].w) * 0.5) / rho_bar;
}
}
}
}
}
vel_XSPH_Sorted_D[index] = paramsD.EPS_XSPH * deltaV + shift_r[index]*(1.0/paramsD.dT);
if (!(isfinite(vel_XSPH_Sorted_D[index].x) && isfinite(vel_XSPH_Sorted_D[index].y) &&
isfinite(vel_XSPH_Sorted_D[index].z))) {
printf("Error! particle vXSPH is NAN: thrown from ChFsiForceExplicitSPH.cu, newVel_XSPH_D !\n");
*isErrorD = true;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
ChFsiForceExplicitSPH::ChFsiForceExplicitSPH(std::shared_ptr<ChBce> otherBceWorker,
std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<NumberOfObjects> otherNumObjects)
: ChFsiForce(otherBceWorker,
otherSortedSphMarkersD,
otherMarkersProximityD,
otherFsiGeneralData,
otherParamsH,
otherNumObjects) {
CopyParams_NumberOfObjects(paramsH, numObjectsH);
density_initialization = 0;
}
//--------------------------------------------------------------------------------------------------------------------------------
ChFsiForceExplicitSPH::~ChFsiForceExplicitSPH() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::Finalize() {
ChFsiForce::Finalize();
hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects));
hipMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
hipDeviceSynchronize();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::ForceSPH(std::shared_ptr<SphMarkerDataD> otherSphMarkersD,
std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD) {
sphMarkersD = otherSphMarkersD;
fsiCollisionSystem->ArrangeData(sphMarkersD);
bceWorker->ModifyBceVelocity(sphMarkersD, otherFsiBodiesD);
CollideWrapper();
CalculateXSPH_velocity();
// AddGravityToFluid();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::CollideWrapper() {
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------------------------------------------------------
// thread per particle
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 128, numBlocks, numThreads);
/* Execute the kernel */
// thrust::device_vector<Real> _sumWij_rhoi(numObjectsH->numAllMarkers);
thrust::device_vector<Real4> sortedDerivVelRho(numObjectsH->numAllMarkers);
thrust::device_vector<Real3> sortedDerivTauXxYyZz(numObjectsH->numAllMarkers);
thrust::device_vector<Real3> sortedDerivTauXyXzYz(numObjectsH->numAllMarkers);
shift_r.resize(numObjectsH->numAllMarkers);
// thrust::fill(_sumWij_rhoi.begin(), _sumWij_rhoi.end(), 0.);
// thrust::fill(shift_r.begin(), shift_r.end(), mR3(0.0));
// thrust::fill(sortedDerivVelRho.begin(), sortedDerivVelRho.end(), mR4(0.0));
// thrust::fill(sortedDerivTauXxYyZz.begin(), sortedDerivTauXxYyZz.end(), mR3(0.0));
// thrust::fill(sortedDerivTauXyXzYz.begin(), sortedDerivTauXyXzYz.end(), mR3(0.0));
// thrust::device_vector<Real4> rhoPresMuD_old = sortedSphMarkersD->rhoPresMuD;
if (density_initialization == 0){
thrust::device_vector<Real> _sumWij_rhoi(numObjectsH->numAllMarkers);
thrust::device_vector<Real4> rhoPresMuD_old = sortedSphMarkersD->rhoPresMuD;
printf("Re-initializing density after %d steps.\n", paramsH->densityReinit);
hipLaunchKernelGGL(( calcRho_kernel), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR4CAST(rhoPresMuD_old),
R1CAST(_sumWij_rhoi), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD),
numObjectsH->numAllMarkers, density_initialization, isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "calcRho_kernel");
}
if(paramsH->elastic_SPH){ // For granular material
// execute the kernel Navier_Stokes and Shear_Stress_Rate in one kernel
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
// execute the kernel
hipLaunchKernelGGL(( NS_SSR), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sortedDerivVelRho),mR3CAST(sortedDerivTauXxYyZz), mR3CAST(sortedDerivTauXyXzYz),
mR3CAST(shift_r), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR3CAST(bceWorker->velMas_ModifiedBCE), mR4CAST(bceWorker->rhoPreMu_ModifiedBCE),
mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD),
U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes and Shear_Stress_Rate");
}
else{ // For fluid
// EOS<<<numBlocks, numThreads>>>(mR4CAST(sortedSphMarkersD->rhoPresMuD),
// numObjectsH->numAllMarkers, isErrorD);
// ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "EOS");
thrust::device_vector<Real3>::iterator iter =
thrust::max_element(sortedSphMarkersD->velMasD.begin(),
sortedSphMarkersD->velMasD.end(), compare_Real3_mag());
Real MaxVel = length(*iter);
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
// execute the kernel
hipLaunchKernelGGL(( Navier_Stokes), dim3(numBlocks), dim3(numThreads), 0, 0,
mR4CAST(sortedDerivVelRho), mR3CAST(shift_r), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR3CAST(bceWorker->velMas_ModifiedBCE), mR4CAST(bceWorker->rhoPreMu_ModifiedBCE),
mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD),
U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, MaxVel, isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes");
}
CopySortedToOriginal_Invasive_R4(fsiGeneralData->derivVelRhoD_old,
sortedDerivVelRho, markersProximityD->gridMarkerIndexD);
if(paramsH->elastic_SPH){
CopySortedToOriginal_Invasive_R3(fsiGeneralData->derivTauXxYyZzD,
sortedDerivTauXxYyZz, markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_Invasive_R3(fsiGeneralData->derivTauXyXzYzD,
sortedDerivTauXyXzYz, markersProximityD->gridMarkerIndexD);
}
sortedDerivVelRho.clear();
sortedDerivTauXxYyZz.clear();
sortedDerivTauXyXzYz.clear();
hipFree(isErrorD);
free(isErrorH);
density_initialization++;
if (density_initialization >= paramsH->densityReinit)
density_initialization = 0;
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::CalculateXSPH_velocity() {
/* Calculate vel_XSPH */
if (vel_XSPH_Sorted_D.size() != numObjectsH->numAllMarkers) {
printf("vel_XSPH_Sorted_D.size() %zd numObjectsH->numAllMarkers %zd \n",
vel_XSPH_Sorted_D.size(), numObjectsH->numAllMarkers);
throw std::runtime_error(
"Error! size error vel_XSPH_Sorted_D Thrown from "
"CalculateXSPH_velocity!\n");
}
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
hipMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
hipMemcpy(isErrorD, isErrorH, sizeof(bool), hipMemcpyHostToDevice);
//------------------------------------------------------------------------
if(paramsH->elastic_SPH){
// The XSPH vector already included in the shifting vector
CopySortedToOriginal_Invasive_R3(fsiGeneralData->vel_XSPH_D, shift_r, markersProximityD->gridMarkerIndexD);
}
else{
/* thread per particle */
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numAllMarkers, 128, numBlocks, numThreads);
thrust::device_vector<Real4> sortedPosRad_old = sortedSphMarkersD->posRadD;
thrust::fill(vel_XSPH_Sorted_D.begin(), vel_XSPH_Sorted_D.end(), mR3(0.0));
/* Execute the kernel */
hipLaunchKernelGGL(( CalcVel_XSPH_D), dim3(numBlocks), dim3(numThreads), 0, 0,
mR3CAST(vel_XSPH_Sorted_D), mR4CAST(sortedPosRad_old), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(shift_r),
U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "CalcVel_XSPH_D");
CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vel_XSPH_D, vel_XSPH_Sorted_D,
markersProximityD->gridMarkerIndexD);
// CopySortedToOriginal_NonInvasive_R4(sphMarkersD->posRadD, sortedSphMarkersD->posRadD,
// markersProximityD->gridMarkerIndexD);
}
if (density_initialization % paramsH->densityReinit == 0)
CopySortedToOriginal_NonInvasive_R4(sphMarkersD->rhoPresMuD,
sortedSphMarkersD->rhoPresMuD, markersProximityD->gridMarkerIndexD);
hipFree(isErrorD);
free(isErrorH);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::AddGravityToFluid() {
// add gravity to fluid markers
/* Add outside forces. Don't add gravity to rigids, BCE, and boundaries, it is
* added in ChSystem */
Real3 totalFluidBodyForce3 = paramsH->bodyForce3 + paramsH->gravity;
thrust::device_vector<Real4> bodyForceD(numObjectsH->numAllMarkers);
thrust::fill(bodyForceD.begin(), bodyForceD.end(), mR4(totalFluidBodyForce3));
thrust::transform(
fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].x,
fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].y, bodyForceD.begin(),
fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].x, thrust::plus<Real4>());
bodyForceD.clear();
}
} // namespace fsi
} // namespace chrono
//================================================================================================================================
| 466b01567e317c29b591b41c2de03a2d7e07a71d.cu | // =============================================================================
// PROJECT CHRONO - http://projectchrono.org
//
// Copyright (c) 2014 projectchrono.org
// All rights reserved.
//
// Use of this source code is governed by a BSD-style license that can be found
// in the LICENSE file at the top level of the distribution and at
// http://projectchrono.org/license-chrono.txt.
//
// =============================================================================
// Author: Arman Pazouki, Wei Hu
// =============================================================================
#include <thrust/extrema.h>
#include <thrust/sort.h>
#include "chrono_fsi/physics/ChFsiForceExplicitSPH.cuh"
#include "chrono_fsi/physics/ChSphGeneral.cuh"
//================================================================================================================================
namespace chrono {
namespace fsi {
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_G_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* G_i,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
Real3 posRadA = mR3(sortedPosRad[i_idx]);
Real h_i = sortedPosRad[i_idx].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// This is the elements of inverse of G
Real mGi[9] = {0.0};
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real d = length(rij);
if (d > SuppRadii || sortedRhoPreMu[j].w <= -2)
continue;
Real3 grad_i_wij = GradWh(rij, h_i);
Real3 grw_vj = grad_i_wij * paramsD.volume0;
mGi[0] -= rij.x * grw_vj.x;
mGi[1] -= rij.x * grw_vj.y;
mGi[2] -= rij.x * grw_vj.z;
mGi[3] -= rij.y * grw_vj.x;
mGi[4] -= rij.y * grw_vj.y;
mGi[5] -= rij.y * grw_vj.z;
mGi[6] -= rij.z * grw_vj.x;
mGi[7] -= rij.z * grw_vj.y;
mGi[8] -= rij.z * grw_vj.z;
}
}
}
Real Det = (mGi[0] * mGi[4] * mGi[8] -
mGi[0] * mGi[5] * mGi[7] -
mGi[1] * mGi[3] * mGi[8] +
mGi[1] * mGi[5] * mGi[6] +
mGi[2] * mGi[3] * mGi[7] -
mGi[2] * mGi[4] * mGi[6]);
if (abs(Det) > 0.01) {
Real OneOverDet = 1.0/Det;
G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet;
G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet;
G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet;
G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet;
G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet;
G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet;
G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet;
G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet;
G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet;
} else {
for (int i = 0; i < 9; i++) {
G_i[i] = 0.0;
}
G_i[0] = 1;
G_i[4] = 1;
G_i[8] = 1;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_A_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* A_i,
Real* G_i,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
Real3 posRadA = mR3(sortedPosRad[i_idx]);
Real h_i = sortedPosRad[i_idx].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real d = length(rij);
if (d > SuppRadii || sortedRhoPreMu[j].w <= -2)
continue;
Real h_j = sortedPosRad[j].w;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_ij = GradWh(rij, h_ij);
Real V_j = paramsD.markerMass/paramsD.rho0;
Real com_part = 0;
com_part = (G_i[0] * grad_ij.x + G_i[1] * grad_ij.y + G_i[2] * grad_ij.z) * V_j;
A_i[0] += rij.x * rij.x * com_part; // 111
A_i[1] += rij.x * rij.y * com_part; // 112
A_i[2] += rij.x * rij.z * com_part; // 113
A_i[3] += rij.y * rij.x * com_part; // 121
A_i[4] += rij.y * rij.y * com_part; // 122
A_i[5] += rij.y * rij.z * com_part; // 123
A_i[6] += rij.z * rij.x * com_part; // 131
A_i[7] += rij.z * rij.y * com_part; // 132
A_i[8] += rij.z * rij.z * com_part; // 133
com_part = (G_i[3] * grad_ij.x + G_i[4] * grad_ij.y + G_i[5] * grad_ij.z) * V_j;
A_i[9] += rij.x * rij.x * com_part; // 211
A_i[10] += rij.x * rij.y * com_part; // 212
A_i[11] += rij.x * rij.z * com_part; // 213
A_i[12] += rij.y * rij.x * com_part; // 221
A_i[13] += rij.y * rij.y * com_part; // 222
A_i[14] += rij.y * rij.z * com_part; // 223
A_i[15] += rij.z * rij.x * com_part; // 231
A_i[16] += rij.z * rij.y * com_part; // 232
A_i[17] += rij.z * rij.z * com_part; // 233
com_part = (G_i[6] * grad_ij.x + G_i[7] * grad_ij.y + G_i[8] * grad_ij.z) * V_j;
A_i[18] += rij.x * rij.x * com_part; // 311
A_i[19] += rij.x * rij.y * com_part; // 312
A_i[20] += rij.x * rij.z * com_part; // 313
A_i[21] += rij.y * rij.x * com_part; // 321
A_i[22] += rij.y * rij.y * com_part; // 322
A_i[23] += rij.y * rij.z * com_part; // 323
A_i[24] += rij.z * rij.x * com_part; // 331
A_i[25] += rij.z * rij.y * com_part; // 332
A_i[26] += rij.z * rij.z * com_part; // 333
}
}
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void calc_L_Matrix(Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real* A_i,
Real* L_i,
Real* G_i,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
Real3 posRadA = mR3(sortedPosRad[i_idx]);
Real h_i = sortedPosRad[i_idx].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real B[36] = {0.0};
Real L[6] = {0.0};
// get address in grid
int3 gridPos = calcGridPos(posRadA);
// examine neighbouring cells
for (int z = -1; z <= 1; z++)
for (int y = -1; y <= 1; y++)
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
// get start of bucket for this cell50
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) { // cell is not empty
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
Real d = length(rij);
if (d > SuppRadii || sortedRhoPreMu[j].w <= -2)
continue;
Real3 eij = rij / d;
Real h_j = sortedPosRad[j].w;
// Real m_j = paramsD.markerMass;
Real h_ij = 0.5 * (h_j + h_i);
Real3 grad_ij = GradWh(rij, h_ij);
Real V_j = paramsD.markerMass/paramsD.rho0;
Real com_part = 0;
// mn=11
Real XX = (eij.x * grad_ij.x);
Real XY = (eij.x * grad_ij.y + eij.y * grad_ij.x);
Real XZ = (eij.x * grad_ij.z + eij.z * grad_ij.x);
Real YY = (eij.y * grad_ij.y);
Real YZ = (eij.y * grad_ij.z + eij.z * grad_ij.y);
Real ZZ = (eij.z * grad_ij.z);
com_part = (A_i[0] * eij.x + A_i[9] * eij.y + A_i[18] * eij.z + rij.x * eij.x) * V_j;
B[6 * 0 + 0] += com_part * XX; // 11
B[6 * 0 + 1] += com_part * XY; // 12
B[6 * 0 + 2] += com_part * XZ; // 13
B[6 * 0 + 3] += com_part * YY; // 14
B[6 * 0 + 4] += com_part * YZ; // 15
B[6 * 0 + 5] += com_part * ZZ; // 15
// mn=12
com_part = (A_i[1] * eij.x + A_i[10] * eij.y + A_i[19] * eij.z + rij.x * eij.y) * V_j;
B[6 * 1 + 0] += com_part * XX; // 21
B[6 * 1 + 1] += com_part * XY; // 22
B[6 * 1 + 2] += com_part * XZ; // 23
B[6 * 1 + 3] += com_part * YY; // 24
B[6 * 1 + 4] += com_part * YZ; // 25
B[6 * 1 + 5] += com_part * ZZ; // 25
// mn=13
com_part = (A_i[2] * eij.x + A_i[11] * eij.y + A_i[20] * eij.z + rij.x * eij.z) * V_j;
B[6 * 2 + 0] += com_part * XX; // 31
B[6 * 2 + 1] += com_part * XY; // 32
B[6 * 2 + 2] += com_part * XZ; // 33
B[6 * 2 + 3] += com_part * YY; // 34
B[6 * 2 + 4] += com_part * YZ; // 35
B[6 * 2 + 5] += com_part * ZZ; // 36
// Note that we skip mn=21 since it is similar to mn=12
// mn=22
com_part = (A_i[4] * eij.x + A_i[13] * eij.y + A_i[22] * eij.z + rij.y * eij.y) * V_j;
B[6 * 3 + 0] += com_part * XX; // 41
B[6 * 3 + 1] += com_part * XY; // 42
B[6 * 3 + 2] += com_part * XZ; // 43
B[6 * 3 + 3] += com_part * YY; // 44
B[6 * 3 + 4] += com_part * YZ; // 45
B[6 * 3 + 5] += com_part * ZZ; // 46
// mn=23
com_part = (A_i[5] * eij.x + A_i[14] * eij.y + A_i[23] * eij.z + rij.y * eij.z) * V_j;
B[6 * 4 + 0] += com_part * XX; // 51
B[6 * 4 + 1] += com_part * XY; // 52
B[6 * 4 + 2] += com_part * XZ; // 53
B[6 * 4 + 3] += com_part * YY; // 54
B[6 * 4 + 4] += com_part * YZ; // 55
B[6 * 4 + 5] += com_part * ZZ; // 56
// mn=33
com_part = (A_i[8] * eij.x + A_i[17] * eij.y + A_i[26] * eij.z + rij.z * eij.z) * V_j;
B[6 * 5 + 0] += com_part * XX; // 61
B[6 * 5 + 1] += com_part * XY; // 62
B[6 * 5 + 2] += com_part * XZ; // 63
B[6 * 5 + 3] += com_part * YY; // 64
B[6 * 5 + 4] += com_part * YZ; // 65
B[6 * 5 + 5] += com_part * ZZ; // 66
}
}
}
inv6xdelta_mn(B, L);
L_i[0] = L[0];
L_i[1] = L[1];
L_i[2] = L[2];
L_i[3] = L[1];
L_i[4] = L[3];
L_i[5] = L[4];
L_i[6] = L[2];
L_i[7] = L[4];
L_i[8] = L[5];
// Real Det = (L_i[0] * L_i[4] * L_i[8] - L_i[0] * L_i[5] * L_i[7] - L_i[1] * L_i[3] * L_i[8] +
// L_i[1] * L_i[5] * L_i[6] + L_i[2] * L_i[3] * L_i[7] - L_i[2] * L_i[4] * L_i[6]);
// if (abs(Det) < 0.01) {
// for (int i = 0; i < 9; i++) {
// L_i[0 * 9 + i] = 0.0;
// L_i[0 * 9 + 0] = 1;
// L_i[0 * 9 + 4] = 1;
// L_i[0 * 9 + 8] = 1;
// }
// }
// printf("L Det %f\n", Det);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Shear_Stress_Rate(Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real3* sortedVelMas,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
Real3* sortedDerivTauXxYyZz,
Real3* sortedDerivTauXyXzYz,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers) {
return;
}
if (sortedRhoPreMu[index].w > -0.5) {
return;
}
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real hA = sortedPosRad[index].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real tauxx = sortedTauXxYyZz[index].x;
Real tauyy = sortedTauXxYyZz[index].y;
Real tauzz = sortedTauXxYyZz[index].z;
Real tauxy = sortedTauXyXzYz[index].x;
Real tauxz = sortedTauXyXzYz[index].y;
Real tauyz = sortedTauXyXzYz[index].z;
Real tauzx = tauxz;
Real tauzy = tauyz;
Real tauyx = tauxy;
Real dTauxx = 0.0;
Real dTauyy = 0.0;
Real dTauzz = 0.0;
Real dTauxy = 0.0;
Real dTauxz = 0.0;
Real dTauyz = 0.0;
Real G_i[9] = {0.0};
calc_G_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,G_i,cellStart,cellEnd,numAllMarkers);
// get address in grid
int3 gridPos = calcGridPos(posRadA);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > SuppRadii)
continue;
Real3 velMasB = sortedVelMas[j];
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuB.w > -1.0) {
int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers);
if (!(bceIndexB >= 0 &&
bceIndexB < numObjectsD.numBoundaryMarkers + numObjectsD.numRigid_SphMarkers)) {
printf("Error! bceIndex out of bound, collideCell !\n");
}
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB]; // to check
velMasB = velMas_ModifiedBCE[bceIndexB]; // to check
velMasB = 2.0*velMasB - velMasA; // noslip BC
}
Real rhoB = rhoPresMuB.x;
Real hB = sortedPosRad[j].w;
Real mB = paramsD.markerMass;
Real3 gradW = GradWh(dist3, (hA + hB) * 0.5);
Real3 gradW_new;
gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z;
gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z;
gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z;
gradW = gradW_new;
// start to calculate the rate
Real Gm = paramsD.G_shear; // shear modulus of the material
Real half_mB_over_rhoB = 0.5 * (mB / rhoB);
Real3 vAB = velMasA - velMasB;
Real3 vAB_h = (velMasA - velMasB) * half_mB_over_rhoB;
// entries of strain rate tensor
Real exx = -2.0 * vAB_h.x * gradW.x;
Real eyy = -2.0 * vAB_h.y * gradW.y;
Real ezz = -2.0 * vAB_h.z * gradW.z;
Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x;
Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x;
Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y;
// entries of rotation rate (spin) tensor
// Real wxx = 0.0;
// Real wyy = 0.0;
// Real wzz = 0.0;
Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x;
Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x;
Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y;
Real wyx = -wxy;
// Real wzx = -wxz;
Real wzy = -wyz;
Real edia = 1.0 / 3.0 * (exx + eyy + ezz);
Real twoGm = 2.0 * Gm;
Real K_edia = paramsD.K_bulk*1.0*edia;
dTauxx += twoGm * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia;
dTauyy += twoGm * (eyy - edia) - 2.0 * (tauyx * wxy - tauyz * wyz) + K_edia;
dTauzz += twoGm * (ezz - edia) - 2.0 * (tauzx * wxz + tauzy * wyz) + K_edia;
dTauxy += twoGm * exy - (tauxx * wxy + tauxz * wzy) + (wxy * tauyy + wxz * tauzy);
dTauxz += twoGm * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz);
dTauyz += twoGm * eyz - (tauyx * wxz + tauyy * wyz) + (wyx * tauxz + wyz * tauzz);
}
}
}
}
}
sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz);
sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void calcRho_kernel(Real4* sortedPosRad,
Real4* sortedRhoPreMu,
Real4* sortedRhoPreMu_old,
Real* _sumWij_rhoi,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
int density_reinit,
volatile bool* isErrorD) {
uint i_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (i_idx >= numAllMarkers) {
return;
}
sortedRhoPreMu_old[i_idx].y = Eos(sortedRhoPreMu_old[i_idx].x, sortedRhoPreMu_old[i_idx].w);
Real3 posRadA = mR3(sortedPosRad[i_idx]);
Real h_i = sortedPosRad[i_idx].w;
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real sum_mW = 0;
Real sum_mW_rho = 0.0000001;
Real sum_W = 0.0;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
if (startIndex != 0xffffffff) {
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > SuppRadii)
continue;
if (sortedRhoPreMu_old[j].w == -1) {
Real h_j = sortedPosRad[j].w;
Real m_j = paramsD.markerMass;
Real W3 = W3h(d, 0.5 * (h_j + h_i));
sum_mW += m_j * W3;
sum_W += W3;
sum_mW_rho += m_j * W3 / sortedRhoPreMu_old[j].x;
}
}
}
}
}
}
// sortedRhoPreMu[i_idx].x = sum_mW;
if ((density_reinit == 0) && (sortedRhoPreMu[i_idx].w == -1))
sortedRhoPreMu[i_idx].x = sum_mW / sum_mW_rho;
if ((sortedRhoPreMu[i_idx].x > 3 * paramsD.rho0 || sortedRhoPreMu[i_idx].x < 0.01 * paramsD.rho0) &&
sortedRhoPreMu[i_idx].w == -1)
printf("(calcRho_kernel)density marker %d, sum_mW=%f, sum_W=%f, h_i=%f\n", i_idx, sum_mW, sum_W, h_i);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ __inline__ void modifyPressure(Real4& rhoPresMuB, const Real3& dist3Alpha) {
// body force in x direction
rhoPresMuB.y = (dist3Alpha.x > 0.5 * paramsD.boxDims.x) ? (rhoPresMuB.y - paramsD.deltaPress.x) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.x < -0.5 * paramsD.boxDims.x) ? (rhoPresMuB.y + paramsD.deltaPress.x) : rhoPresMuB.y;
// body force in x direction
rhoPresMuB.y = (dist3Alpha.y > 0.5 * paramsD.boxDims.y) ? (rhoPresMuB.y - paramsD.deltaPress.y) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.y < -0.5 * paramsD.boxDims.y) ? (rhoPresMuB.y + paramsD.deltaPress.y) : rhoPresMuB.y;
// body force in x direction
rhoPresMuB.y = (dist3Alpha.z > 0.5 * paramsD.boxDims.z) ? (rhoPresMuB.y - paramsD.deltaPress.z) : rhoPresMuB.y;
rhoPresMuB.y = (dist3Alpha.z < -0.5 * paramsD.boxDims.z) ? (rhoPresMuB.y + paramsD.deltaPress.z) : rhoPresMuB.y;
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 CubicSolve(Real aa, Real bb, Real cc, Real dd) {
Real disc, q, r, dum1, dum2, term1, r13;
bb /= aa;
cc /= aa;
dd /= aa;
if (aa == 0) {
return mR3(0, 0, 0);
}
if (abs(bb) < 1e-9) {
return mR3(0, 0, 0);
}
if (abs(cc) < 1e-9) {
return mR3(0, 0, 0);
}
if (abs(dd) < 1e-9) {
return mR3(0, 0, 0);
}
q = (3.0 * cc - (bb * bb)) / 9.0;
r = -(27.0 * dd) + bb * (9.0 * cc - 2.0 * (bb * bb));
r /= 54.0;
disc = q * q * q + r * r;
term1 = (bb / 3.0);
/* dataForm.x1Im.value = 0; //The first root is always real.
if (disc > 0) { // one root real, two are complex
s = r + Math.sqrt(disc);
s = ((s < 0) ? -Math.pow(-s, (1.0/3.0)) : Math.pow(s, (1.0/3.0)));
t = r - Math.sqrt(disc);
t = ((t < 0) ? -Math.pow(-t, (1.0/3.0)) : Math.pow(t, (1.0/3.0)));
dataForm.x1Re.value = -term1 + s + t;
term1 += (s + t)/2.0;
dataForm.x3Re.value = dataForm.x2Re.value = -term1;
term1 = Math.sqrt(3.0)*(-t + s)/2;
dataForm.x2Im.value = term1;
dataForm.x3Im.value = -term1;
return;
}
// End if (disc > 0)
// The remaining options are all real
dataForm.x3Im.value = dataForm.x2Im.value = 0;
if (disc == 0){ // All roots real, at least two are equal.
r13 = ((r < 0) ? -Math.pow(-r,(1.0/3.0)) : Math.pow(r,(1.0/3.0)));
dataForm.x1Re.value = -term1 + 2.0*r13;
dataForm.x3Re.value = dataForm.x2Re.value = -(r13 + term1);
return;
} // End if (disc == 0)
*/
Real xRex, xRey, xRez;
// have complex root
if (disc > 0) {
xRex = 0.0;
xRey = 0.0;
xRez = 0.0;
return mR3(xRex, xRey, xRez);
}
// All roots real, at least two are equal.
if (disc == 0) {
if (r < 0) {
r13 = pow(-r, (1.0 / 3.0));
} else {
r13 = pow(r, (1.0 / 3.0));
}
xRex = -term1 + 2.0 * r13;
xRey = -(r13 + term1);
xRez = xRey;
return mR3(xRex, xRey, xRez);
}
// All roots are real and unequal (to get here, q < 0)
q = -q;
dum1 = q * q * q;
dum2 = r / (sqrt(dum1 + 1.0e-9));
if ((dum2 >= 0) && (dum2 <= 1)) {
dum1 = acos(dum2);
} else {
xRex = 0.0;
xRey = 0.0;
xRez = 0.0;
return mR3(xRex, xRey, xRez);
}
r13 = 2.0 * sqrt(q);
xRex = -term1 + r13 * cos(dum1 / 3.0);
xRey = -term1 + r13 * cos((dum1 + 2.0 * 3.1415926) / 3.0);
xRez = -term1 + r13 * cos((dum1 + 4.0 * 3.1415926) / 3.0);
return mR3(xRex, xRey, xRez);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 CubicEigen(Real4 c1, Real4 c2, Real4 c3) {
Real a = c1.x;
Real b = c1.y;
Real c = c1.z;
Real d = c1.w;
Real l = c2.x;
Real m = c2.y;
Real n = c2.z;
Real k = c2.w;
Real p = c3.x;
Real q = c3.y;
Real r = c3.z;
Real s = c3.w;
Real D = (a * m * r + b * p * n + c * l * q) - (a * n * q + b * l * r + c * m * p) + 1.0e-9;
Real x = ((b * r * k + c * m * s + d * n * q) - (b * n * s + c * q * k + d * m * r)) / D;
Real y = ((a * n * s + c * p * k + d * l * r) - (a * r * k + c * l * s + d * n * p)) / D;
Real z = ((a * q * k + b * l * s + d * m * p) - (a * m * s + b * p * k + d * l * q)) / D;
b = b + 1.0e-9;
x = 1.0e0;
z = (-l + a * m / b) / (n - c * m / b);
y = (-a - c * z) / b;
Real R = sqrt(x * x + y * y + z * z);
x = x / R;
y = y / R;
z = z / R;
// if(abs(D) < 1){
// return mR3(0,0,0);
// }
// if(abs(m) < 0.1){
// x=0;
// y=1;
// z=0;
// return mR3(x,y,z);
// }
// else{
// y=0;
// if(abs(c) > 0.1){
// x=1;
// z=-a/c;
// return mR3(x,y,z);
// }
// if(abs(a) > 0.1){
// z=1;
// x=-c/a;
// return mR3(x,y,z);
// }
// }
return mR3(x, y, z);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 DifVelocityRho(float G_i[9],
Real3 dist3,
Real d,
Real4 posRadA,
Real4 posRadB,
Real3 velMasA,
Real3 vel_XSPH_A,
Real3 velMasB,
Real3 vel_XSPH_B,
Real4 rhoPresMuA,
Real4 rhoPresMuB,
Real multViscosity) {
if (rhoPresMuA.w > -1 && rhoPresMuB.w > -1)
return mR4(0.0);
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
// Continuty equation
Real derivRho = paramsD.markerMass * dot(vel_XSPH_A - vel_XSPH_B, gradW);
// Viscosity
Real rAB_Dot_GradWh = dot(dist3, gradW);
Real rAB_Dot_GradWh_OverDist = rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML);
Real3 derivV = - paramsD.markerMass * (rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW
+ paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu0
* rAB_Dot_GradWh_OverDist * (velMasA - velMasB) / square(rhoPresMuA.x + rhoPresMuB.x);
// Artificial viscosity
Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3);
if ( (vAB_Dot_rAB < 0.0) && (1==0)) { // change to 1==1 if needs artificial viscosity
Real alpha = paramsD.Ar_vis_alpha;
Real c_ab = paramsD.Cs;
Real rho = 0.5f * (rhoPresMuA.x * rhoPresMuB.x);
Real nu = -alpha * paramsD.HSML * c_ab / rho;
Real derivM1 = -paramsD.markerMass * (nu * vAB_Dot_rAB / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML));
derivV.x += derivM1 * gradW.x;
derivV.y += derivM1 * gradW.y;
derivV.z += derivM1 * gradW.z;
}
return mR4(derivV, derivRho);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 DifVelocityRho_ElasticSPH(Real3 gradW,
Real3 dist3,
Real d,
Real invd,
Real4 posRadA,
Real4 posRadB,
Real3 velMasA_in,
Real3 vel_XSPH_A_in,
Real3 velMasB_in,
Real3 vel_XSPH_B_in,
Real4 rhoPresMuA,
Real4 rhoPresMuB,
Real multViscosity,
Real3 tauXxYyZz_A_in,
Real3 tauXyXzYz_A_in,
Real3 tauXxYyZz_B_in,
Real3 tauXyXzYz_B_in) {
Real3 velMasA = velMasA_in;
Real3 velMasB = velMasB_in;
Real3 vel_XSPH_A = vel_XSPH_A_in;
Real3 vel_XSPH_B = vel_XSPH_B_in;
Real3 tauXxYyZz_A = tauXxYyZz_A_in;
Real3 tauXxYyZz_B = tauXxYyZz_B_in;
Real3 tauXyXzYz_A = tauXyXzYz_A_in;
Real3 tauXyXzYz_B = tauXyXzYz_B_in;
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w > -0.5)
return mR4(0.0);
if (rhoPresMuA.w < -0.5 && rhoPresMuB.w > -0.5){
tauXxYyZz_B = tauXxYyZz_A;
tauXyXzYz_B = tauXyXzYz_A;
vel_XSPH_B = 2.0*vel_XSPH_B - vel_XSPH_A; // noslip BC
// velMasB = 2.0*velMasB - velMasA; // noslip BC
}
if (rhoPresMuA.w > -0.5 && rhoPresMuB.w < -0.5){
tauXxYyZz_A = tauXxYyZz_B;
tauXyXzYz_A = tauXyXzYz_B;
vel_XSPH_A = 2.0*vel_XSPH_A - vel_XSPH_B;
}
Real txxA = tauXxYyZz_A.x;
Real tyyA = tauXxYyZz_A.y;
Real tzzA = tauXxYyZz_A.z;
Real txyA = tauXyXzYz_A.x;
Real txzA = tauXyXzYz_A.y;
Real tyzA = tauXyXzYz_A.z;
Real txxB = tauXxYyZz_B.x;
Real tyyB = tauXxYyZz_B.y;
Real tzzB = tauXxYyZz_B.z;
Real txyB = tauXyXzYz_B.x;
Real txzB = tauXyXzYz_B.y;
Real tyzB = tauXyXzYz_B.z;
// Real rhoA = rhoPresMuA.x;
// Real rhoB = rhoPresMuB.x;
// Real rhoA2 = rhoA * rhoA;
// Real rhoB2 = rhoB * rhoB;
Real Mass = paramsD.markerMass;
Real MassOverRhoA2 = Mass * paramsD.invrho0 * paramsD.invrho0;
Real MassOverRhoB2 = MassOverRhoA2;
Real3 MA_gradW = gradW * MassOverRhoA2;
Real3 MB_gradW = gradW * MassOverRhoB2;
Real derivVx = (txxA * MA_gradW.x + txyA * MA_gradW.y + txzA * MA_gradW.z) +
(txxB * MB_gradW.x + txyB * MB_gradW.y + txzB * MB_gradW.z) ;
Real derivVy = (txyA * MA_gradW.x + tyyA * MA_gradW.y + tyzA * MA_gradW.z) +
(txyB * MB_gradW.x + tyyB * MB_gradW.y + tyzB * MB_gradW.z) ;
Real derivVz = (txzA * MA_gradW.x + tyzA * MA_gradW.y + tzzA * MA_gradW.z) +
(txzB * MB_gradW.x + tyzB * MB_gradW.y + tzzB * MB_gradW.z) ;
// TODO: Visco-plastic model
// Real vel = length(velMasA);
// if(vel > 0.3){
// Real rAB_Dot_GradWh = dot(dist3, gradW);
// Real rAB_Dot_GradWh_OverDist = rAB_Dot_GradWh / (d * d + paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML);
// Real3 derivV = - paramsD.markerMass *(rhoPresMuA.y / (rhoPresMuA.x * rhoPresMuA.x) + rhoPresMuB.y / (rhoPresMuB.x * rhoPresMuB.x)) * gradW
// + paramsD.markerMass * (8.0f * multViscosity) * paramsD.mu_fric_s
// * pow(rhoPresMuA.x + rhoPresMuB.x, Real(-2)) * rAB_Dot_GradWh_OverDist * (velMasA - velMasB);
// derivVx = derivV.x;
// derivVy = derivV.y;
// derivVz = derivV.z;
// }
// Artificial viscosity
Real vAB_Dot_rAB = dot(velMasA - velMasB, dist3);
// if (vAB_Dot_rAB < 0.0) {
Real alpha = paramsD.Ar_vis_alpha;
Real c_ab = paramsD.Cs;
// Real rho = 0.5f * (rhoA + rhoB);
Real nu = -alpha * paramsD.HSML * c_ab * paramsD.invrho0;
Real derivM1 = -Mass * (nu * vAB_Dot_rAB * (invd * invd));//+ paramsD.epsMinMarkersDis * paramsD.HSML * paramsD.HSML
derivVx += derivM1 * gradW.x;
derivVy += derivM1 * gradW.y;
derivVz += derivM1 * gradW.z;
// }
// TOTO: Damping force
// if (1 == 0) {
// Real xi0 = paramsD.Vis_Dam;
// Real E0 = paramsD.E_young;
// Real h0 = paramsD.HSML;
// Real Cd = xi0 * sqrt(E0 / (rhoA * h0 * h0));
// derivVx -= Cd * velMasA.x;
// derivVy -= Cd * velMasA.y;
// derivVz -= Cd * velMasA.z;
// }
// Real derivRho = Mass * dot(vel_XSPH_A - vel_XSPH_B, gradW);
return mR4(derivVx, derivVy, derivVz, 0.0);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real3 GradientOperator( float G_i[9],
Real3 dist3,
Real4 posRadA,
Real4 posRadB,
Real fA,
Real fB,
Real4 rhoPresMuA,
Real4 rhoPresMuB) {
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
Real3 gradW_new;
gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z;
gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z;
gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z;
Real Vol = paramsD.markerMass/rhoPresMuB.x;
Real fji = fB - fA;
Real Gra_ij_x = fji*gradW_new.x * Vol;
Real Gra_ij_y = fji*gradW_new.y * Vol;
Real Gra_ij_z = fji*gradW_new.z * Vol;
return mR3(Gra_ij_x, Gra_ij_y, Gra_ij_z);
}
//--------------------------------------------------------------------------------------------------------------------------------
__device__ inline Real4 LaplacianOperator( float G_i[9],
float L_i[9],
Real3 dist3,
Real4 posRadA,
Real4 posRadB,
Real fA,
Real fB,
Real4 rhoPresMuA,
Real4 rhoPresMuB) {
Real3 gradW = GradWh(dist3, (posRadA.w + posRadB.w) * 0.5);
Real d = length(dist3);
Real3 eij = dist3/d;
Real Vol = paramsD.markerMass/rhoPresMuB.x;
Real fij = fA - fB;
Real ex_Gwx = eij.x*gradW.x;
Real ex_Gwy = eij.x*gradW.y;
Real ex_Gwz = eij.x*gradW.z;
Real ey_Gwx = eij.y*gradW.x;
Real ey_Gwy = eij.y*gradW.y;
Real ey_Gwz = eij.y*gradW.z;
Real ez_Gwx = eij.z*gradW.x;
Real ez_Gwy = eij.z*gradW.y;
Real ez_Gwz = eij.z*gradW.z;
Real Part1 = L_i[0]*ex_Gwx + L_i[1]*ex_Gwy + L_i[2]*ex_Gwz
+ L_i[3]*ey_Gwx + L_i[4]*ey_Gwy + L_i[5]*ey_Gwz
+ L_i[6]*ez_Gwx + L_i[7]*ez_Gwy + L_i[8]*ez_Gwz;
Real Part2 = fij/d * Vol;
Real3 Part3 = mR3(-eij.x, -eij.y, -eij.z) * Vol;
return mR4(2.0*Part1*Part2, Part3.x*(2.0*Part1), Part3.y*(2.0*Part1), Part3.z*(2.0*Part1));
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void EOS(Real4* sortedRhoPreMu, uint numAllMarkers, volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
sortedRhoPreMu[index].y = Eos(sortedRhoPreMu[index].x, sortedRhoPreMu[index].w);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void Navier_Stokes(Real4* sortedDerivVelRho,
Real3* shift_r,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
Real MaxVel,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5){
sortedDerivVelRho[index] = mR4(0.0);
return;
}
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real4 rhoPresMuA = sortedRhoPreMu[index];
Real4 derivVelRho = mR4(0.0);
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real G_i[9] = {0.0};
Real A_i[27] = {0.0};
Real L_i[9] = {0.0};
calc_G_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,G_i,cellStart,cellEnd,numAllMarkers);
if(!paramsD.elastic_SPH){
calc_A_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,A_i,G_i,cellStart,cellEnd,numAllMarkers);
calc_L_Matrix(sortedPosRad,sortedVelMas,sortedRhoPreMu,A_i,L_i,G_i,cellStart,cellEnd,numAllMarkers);
}
float Gi[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0};
float Li[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0};
Gi[0] = G_i[0];
Gi[1] = G_i[1];
Gi[2] = G_i[2];
Gi[3] = G_i[3];
Gi[4] = G_i[4];
Gi[5] = G_i[5];
Gi[6] = G_i[6];
Gi[7] = G_i[7];
Gi[8] = G_i[8];
Li[0] = L_i[0];
Li[1] = L_i[1];
Li[2] = L_i[2];
Li[3] = L_i[3];
Li[4] = L_i[4];
Li[5] = L_i[5];
Li[6] = L_i[6];
Li[7] = L_i[7];
Li[8] = L_i[8];
Real3 preGra = mR3(0.0);
Real3 velxGra = mR3(0.0);
Real3 velyGra = mR3(0.0);
Real3 velzGra = mR3(0.0);
Real4 velxLap = mR4(0.0);
Real4 velyLap = mR4(0.0);
Real4 velzLap = mR4(0.0);
Real radii = paramsD.MULT_INITSPACE * paramsD.HSML*1.241;
Real invRadii = 1.0/radii;
Real3 v_ab = (velMasA + velMasA)*0.5;
Real v_ab_m = length(v_ab);
Real bsvdT = paramsD.beta_shifting * v_ab_m * paramsD.dT ;
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real3 inner_sum = mR3(0.0);
// Real mi_bar = 0.0, r0 = 0.0;
Real sum_w_i = W3h(0.0, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE);
int N_ = 1;
int N_s = 0;
for (int x = -1; x <= 1; x++) {
for (int y = -1; y <= 1; y++) {
for (int z = -1; z <= 1; z++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
// Real3 dist3Alpha = posRadA - posRadB;
Real3 dist3 = Distance(posRadA, posRadB); // change from B-A to A-B
Real d = length(dist3);
if (d > SuppRadii)
continue;
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuA.w > -.1 && rhoPresMuB.w > -.1) { // no rigid-rigid force
continue;
}
Real invd = 1.0 / d;
// modifyPressure(rhoPresMuB, dist3Alpha);
// if (!(isfinite(rhoPresMuB.x) && isfinite(rhoPresMuB.y) && isfinite(rhoPresMuB.z))) {
// printf("Error! particle rhoPresMuB is NAN: thrown from modifyPressure !\n");
// }
Real3 velMasB = sortedVelMas[j];
if (rhoPresMuB.w > -1.0) {
int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers);
// if (!(bceIndexB >= 0 &&
// bceIndexB < numObjectsD.numBoundaryMarkers + numObjectsD.numRigid_SphMarkers)) {
// printf("Error! bceIndex out of bound, collideCell !\n");
// }
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB];
velMasB = velMas_ModifiedBCE[bceIndexB];
}
Real multViscosit = 1;
// if (!(isfinite(rhoPresMuB.x) && isfinite(rhoPresMuB.y) && isfinite(rhoPresMuB.z))) {
// printf("Error! particle rhoPresMuB is NAN: thrown from collideCell ! type=%f\n",
// rhoPresMuB.w);
// }
if(paramsD.elastic_SPH){
Real3 gradW = GradWh(dist3, paramsD.HSML);
Real3 gradW_new;
gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z;
gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z;
gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z;
gradW = gradW_new;
derivVelRho += DifVelocityRho_ElasticSPH(gradW, dist3, d, invd,
sortedPosRad[index], sortedPosRad[j], velMasA, velMasA,
velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit,
sortedTauXxYyZz[index], sortedTauXyXzYz[index],
sortedTauXxYyZz[j], sortedTauXyXzYz[j]);
}
else{
derivVelRho += DifVelocityRho(Gi, dist3, d, sortedPosRad[index], sortedPosRad[j], velMasA, velMasA,
velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit);
preGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
-rhoPresMuA.y, rhoPresMuB.y, rhoPresMuA, rhoPresMuB);
velxGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB);
velyGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB);
velzGra += GradientOperator(Gi, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB);
velxLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.x, velMasB.x, rhoPresMuA, rhoPresMuB);
velyLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.y, velMasB.y, rhoPresMuA, rhoPresMuB);
velzLap += LaplacianOperator(Gi, Li, dist3, sortedPosRad[index], sortedPosRad[j],
velMasA.z, velMasB.z, rhoPresMuA, rhoPresMuB);
}
if (d > paramsD.HSML*1.0e-9 && sum_w_i < paramsD.C_Wi) {
sum_w_i = sum_w_i + W3h(d, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE);
N_ = N_ + 1;
}
// find particles that have contact with this particle
if(N_s < 12 && d < 2.0*radii){
Real Pen = (radii - d) * invRadii;
Real3 r_0 = bsvdT * invd * dist3 ;
Real3 r_s = r_0 * Pen;
if (d < 1.0*radii) {
inner_sum += 3.0*r_s;
N_s = N_s + 1;
}
else if (d < 1.1*radii) {
inner_sum += 1.0*r_s;
N_s = N_s + 1;
}
else {
inner_sum += 0.1 * 1.0 * (-r_0);
N_s = N_s + 1;
}
}
}
}
}
}
}
if(paramsD.elastic_SPH){
if(sum_w_i < paramsD.C_Wi){
derivVelRho.w = -1.0;
}
else{
derivVelRho.w = 1.0;
}
}
if(!paramsD.elastic_SPH){
Real nu = paramsD.mu0/paramsD.rho0;
Real dvxdt = -preGra.x/rhoPresMuA.x + (velxLap.x + velxGra.x*velxLap.y + velxGra.y*velxLap.z + velxGra.z*velxLap.w) * nu;
Real dvydt = -preGra.y/rhoPresMuA.x + (velyLap.x + velyGra.x*velyLap.y + velyGra.y*velyLap.z + velyGra.z*velyLap.w) * nu;
Real dvzdt = -preGra.z/rhoPresMuA.x + (velzLap.x + velzGra.x*velzLap.y + velzGra.y*velzLap.z + velzGra.z*velzLap.w) * nu;
Real drhodt = -paramsD.rho0*(velxGra.x + velyGra.y + velzGra.z);
Real Det_G = (Gi[0] * Gi[4] * Gi[8] - Gi[0] * Gi[5] * Gi[7] - Gi[1] * Gi[3] * Gi[8] +
Gi[1] * Gi[5] * Gi[6] + Gi[2] * Gi[3] * Gi[7] - Gi[2] * Gi[4] * Gi[6]);
Real Det_L = (Li[0] * Li[4] * Li[8] - Li[0] * Li[5] * Li[7] - Li[1] * Li[3] * Li[8] +
Li[1] * Li[5] * Li[6] + Li[2] * Li[3] * Li[7] - Li[2] * Li[4] * Li[6]);
if(rhoPresMuA.w == -1){
if( Det_G > 0.9 && Det_G < 1.1 && Det_L > 0.9 && Det_L < 1.1 && sum_w_i > 0.9){
derivVelRho = mR4(dvxdt, dvydt, dvzdt, drhodt);
}
}
}
if (!(isfinite(derivVelRho.x) && isfinite(derivVelRho.y) && isfinite(derivVelRho.z))) {
printf("Error! particle derivVel is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n");
*isErrorD = true;
}
if (!(isfinite(derivVelRho.w))) {
printf("Error! particle derivRho is NAN: thrown from ChFsiForceExplicitSPH.cu, collideD !\n");
*isErrorD = true;
}
// add gravity and other body force to fluid markers
if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5){
Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity;
derivVelRho += mR4(totalFluidBodyForce3);
}
sortedDerivVelRho[index] = derivVelRho;
Real det_r_max = length(0.05*velMasA*paramsD.dT);
Real det_r_A = length(inner_sum);
if(det_r_A < det_r_max){
shift_r[index] = inner_sum;
}
else{
shift_r[index] = inner_sum * det_r_max/(det_r_A + 1e-9);
}
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void NS_SSR( Real4* sortedDerivVelRho,
Real3* sortedDerivTauXxYyZz,
Real3* sortedDerivTauXyXzYz,
Real3* shift_r,
Real4* sortedPosRad,
Real3* sortedVelMas,
Real4* sortedRhoPreMu,
Real3* velMas_ModifiedBCE,
Real4* rhoPreMu_ModifiedBCE,
Real3* sortedTauXxYyZz,
Real3* sortedTauXyXzYz,
uint* gridMarkerIndex,
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
if (sortedRhoPreMu[index].w > -0.5 && sortedRhoPreMu[index].w < 0.5){
sortedDerivVelRho[index] = mR4(0.0);
sortedDerivTauXxYyZz[index] = mR3(0.0);
sortedDerivTauXyXzYz[index] = mR3(0.0);
return;
}
Real3 posRadA = mR3(sortedPosRad[index]);
Real3 velMasA = sortedVelMas[index];
Real4 rhoPresMuA = sortedRhoPreMu[index];
Real hA = sortedPosRad[index].w;
Real4 derivVelRho = mR4(0.0);
Real3 deltaV = mR3(0);
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real SqRadii = SuppRadii * SuppRadii;
uint j_list[150];
uint j_num = 0;
// Get address in grid
int3 gridPos = calcGridPos(posRadA);
// Find the neighbor particle list
for (int x = -1; x <= 1; x++) {
for (int y = -1; y <= 1; y++) {
for (int z = -1; z <= 1; z++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) {
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
// Real d = length(dist3);
Real dd = dist3.x*dist3.x + dist3.y*dist3.y + dist3.z*dist3.z;
if (dd < SqRadii){
j_list[j_num] = j;
j_num++;
}
}
}
}
}
}
Real tauxx = sortedTauXxYyZz[index].x;
Real tauyy = sortedTauXxYyZz[index].y;
Real tauzz = sortedTauXxYyZz[index].z;
Real tauxy = sortedTauXyXzYz[index].x;
Real tauxz = sortedTauXyXzYz[index].y;
Real tauyz = sortedTauXyXzYz[index].z;
Real tauzx = tauxz;
Real tauzy = tauyz;
Real tauyx = tauxy;
Real dTauxx = 0.0;
Real dTauyy = 0.0;
Real dTauzz = 0.0;
Real dTauxy = 0.0;
Real dTauxz = 0.0;
Real dTauyz = 0.0;
// Calculate the correction matrix for gradient operator
Real G_i[9] = {1.0,0.0,0.0, 0.0,1.0,0.0, 0.0,0.0,1.0};
{
Real mGi[9] = {0.0};
for(uint n = 0; n < j_num; n++){
uint j = j_list[n];
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 rij = Distance(posRadA, posRadB);
// Real d = length(rij);
Real dd = rij.x*rij.x + rij.y*rij.y + rij.z*rij.z;
if (dd > SqRadii || sortedRhoPreMu[j].w <= -2)
continue;
Real3 grad_i_wij = GradWh(rij, hA);
Real3 grw_vj = grad_i_wij * paramsD.volume0;
mGi[0] -= rij.x * grw_vj.x;
mGi[1] -= rij.x * grw_vj.y;
mGi[2] -= rij.x * grw_vj.z;
mGi[3] -= rij.y * grw_vj.x;
mGi[4] -= rij.y * grw_vj.y;
mGi[5] -= rij.y * grw_vj.z;
mGi[6] -= rij.z * grw_vj.x;
mGi[7] -= rij.z * grw_vj.y;
mGi[8] -= rij.z * grw_vj.z;
}
Real Det = (mGi[0] * mGi[4] * mGi[8] -
mGi[0] * mGi[5] * mGi[7] -
mGi[1] * mGi[3] * mGi[8] +
mGi[1] * mGi[5] * mGi[6] +
mGi[2] * mGi[3] * mGi[7] -
mGi[2] * mGi[4] * mGi[6]);
if (abs(Det) > 0.01) {
Real OneOverDet = 1.0 / Det;
G_i[0] = (mGi[4] * mGi[8] - mGi[5] * mGi[7]) * OneOverDet;
G_i[1] = -(mGi[1] * mGi[8] - mGi[2] * mGi[7]) * OneOverDet;
G_i[2] = (mGi[1] * mGi[5] - mGi[2] * mGi[4]) * OneOverDet;
G_i[3] = -(mGi[3] * mGi[8] - mGi[5] * mGi[6]) * OneOverDet;
G_i[4] = (mGi[0] * mGi[8] - mGi[2] * mGi[6]) * OneOverDet;
G_i[5] = -(mGi[0] * mGi[5] - mGi[2] * mGi[3]) * OneOverDet;
G_i[6] = (mGi[3] * mGi[7] - mGi[4] * mGi[6]) * OneOverDet;
G_i[7] = -(mGi[0] * mGi[7] - mGi[1] * mGi[6]) * OneOverDet;
G_i[8] = (mGi[0] * mGi[4] - mGi[1] * mGi[3]) * OneOverDet;
}
}
Real radii = paramsD.MULT_INITSPACE * paramsD.HSML*1.241;//1.129;//1.241
Real invRadii = 1.0/radii;
Real3 v_ab = (velMasA + velMasA)*0.5;
Real v_ab_m = length(v_ab);
Real bsvdT = paramsD.beta_shifting * v_ab_m * paramsD.dT ;
Real3 inner_sum = mR3(0.0);
Real sum_w_i = W3h(0.0, sortedPosRad[index].w) * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE);
int N_ = 1;
int N_s = 0;
// Get the interaction from neighbor particles
for(uint n = 0; n < j_num; n++){
uint j = j_list[n];
Real3 posRadB = mR3(sortedPosRad[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > SuppRadii)
continue;
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuA.w > -.1 && rhoPresMuB.w > -.1) {
continue; // No BCE-BCE interaction
}
Real invd = 1.0 / d;
Real3 velMasB = sortedVelMas[j];
if (rhoPresMuB.w > -1.0) {
int bceIndexB = gridMarkerIndex[j] - (numObjectsD.numFluidMarkers);
rhoPresMuB = rhoPreMu_ModifiedBCE[bceIndexB];
velMasB = velMas_ModifiedBCE[bceIndexB];
}
Real multViscosit = 1;
// For granular material dynamics
// Real rhoB = rhoPresMuB.x;
Real hB = sortedPosRad[j].w;
// Real mB = paramsD.markerMass;
Real3 gradW = GradWh(dist3, (hA + hB) * 0.5);
Real3 gradW_new;
gradW_new.x = G_i[0]*gradW.x + G_i[1]*gradW.y + G_i[2]*gradW.z;
gradW_new.y = G_i[3]*gradW.x + G_i[4]*gradW.y + G_i[5]*gradW.z;
gradW_new.z = G_i[6]*gradW.x + G_i[7]*gradW.y + G_i[8]*gradW.z;
gradW = gradW_new;
derivVelRho += DifVelocityRho_ElasticSPH(gradW, dist3, d, invd,
sortedPosRad[index], sortedPosRad[j], velMasA, velMasA,
velMasB, velMasB, rhoPresMuA, rhoPresMuB, multViscosit,
sortedTauXxYyZz[index], sortedTauXyXzYz[index],
sortedTauXxYyZz[j], sortedTauXyXzYz[j]);
if(sortedRhoPreMu[index].w < -0.5){
// start to calculate the stress rate
Real Gm = paramsD.G_shear; // shear modulus of the material
Real half_mB_over_rhoB = 0.5 * paramsD.volume0; //(mB / rhoB);
Real3 velMasB_new = velMasB;
if (rhoPresMuB.w > -1.0)
velMasB_new = 2.0*velMasB - velMasA; // noslip BC
Real3 vAB = velMasA - velMasB_new;
Real3 vAB_h = vAB * half_mB_over_rhoB;
// entries of strain rate tensor
Real exx = -2.0 * vAB_h.x * gradW.x;
Real eyy = -2.0 * vAB_h.y * gradW.y;
Real ezz = -2.0 * vAB_h.z * gradW.z;
Real exy = -vAB_h.x * gradW.y - vAB_h.y * gradW.x;
Real exz = -vAB_h.x * gradW.z - vAB_h.z * gradW.x;
Real eyz = -vAB_h.y * gradW.z - vAB_h.z * gradW.y;
// entries of rotation rate (spin) tensor
// Real wxx = 0.0;
// Real wyy = 0.0;
// Real wzz = 0.0;
Real wxy = -vAB_h.x * gradW.y + vAB_h.y * gradW.x;
Real wxz = -vAB_h.x * gradW.z + vAB_h.z * gradW.x;
Real wyz = -vAB_h.y * gradW.z + vAB_h.z * gradW.y;
Real wyx = -wxy;
// Real wzx = -wxz;
Real wzy = -wyz;
Real edia = 1.0 / 3.0 * (exx + eyy + ezz);
Real twoGm = 2.0 * Gm;
Real K_edia = paramsD.K_bulk*1.0*edia;
dTauxx += twoGm * (exx - edia) + 2.0 * (tauxy * wxy + tauxz * wxz) + K_edia;
dTauyy += twoGm * (eyy - edia) - 2.0 * (tauyx * wxy - tauyz * wyz) + K_edia;
dTauzz += twoGm * (ezz - edia) - 2.0 * (tauzx * wxz + tauzy * wyz) + K_edia;
dTauxy += twoGm * exy - (tauxx * wxy + tauxz * wzy) + (wxy * tauyy + wxz * tauzy);
dTauxz += twoGm * exz - (tauxx * wxz + tauxy * wyz) + (wxy * tauyz + wxz * tauzz);
dTauyz += twoGm * eyz - (tauyx * wxz + tauyy * wyz) + (wyx * tauxz + wyz * tauzz);
}
// Do integration for the kernel function
if (d > paramsD.HSML*1.0e-9) {
Real Wab = W3h(d, sortedPosRad[index].w);
sum_w_i = sum_w_i + Wab * cube(sortedPosRad[index].w * paramsD.MULT_INITSPACE);
// XSPH
if (rhoPresMuB.w > -1.5 && rhoPresMuB.w < -0.5){
deltaV += paramsD.volume0 * (velMasB - velMasA) * Wab;
}
N_ = N_ + 1;
}
// Find particles that have contact with this particle
if(d < 1.25*radii && rhoPresMuB.w <-0.5){
Real Pen = (radii - d) * invRadii;
Real3 r_0 = bsvdT * invd * dist3 ;
Real3 r_s = r_0 * Pen;
if (d < 1.0*radii) {
inner_sum += 3.0*r_s;
N_s = N_s + 1;
}
else if (d < 1.1*radii) {
inner_sum += 1.0*r_s;
N_s = N_s + 1;
}
else {
inner_sum += 0.1 * 1.0 * (-r_0);
N_s = N_s + 1;
}
}
}
// Check particles who have not enough neighbor particles (only for granular now)
if(sum_w_i < paramsD.C_Wi){
derivVelRho.w = -1.0;
}
else{
derivVelRho.w = 1.0;
}
// Calculate the shifting vector
Real det_r_max = length(0.05*velMasA*paramsD.dT);
Real det_r_A = length(inner_sum);
if(det_r_A < det_r_max){
shift_r[index] = inner_sum;
}
else{
shift_r[index] = inner_sum * det_r_max/(det_r_A + 1e-9);
}
shift_r[index] += paramsD.EPS_XSPH * deltaV * paramsD.dT;
shift_r[index] = shift_r[index] * (1.0 / paramsD.dT);
// Add gravity and other body force to fluid markers
if (rhoPresMuA.w > -1.5 && rhoPresMuA.w < -0.5){
Real3 totalFluidBodyForce3 = paramsD.bodyForce3 + paramsD.gravity;
derivVelRho += mR4(totalFluidBodyForce3);
}
sortedDerivVelRho[index] = derivVelRho;
sortedDerivTauXxYyZz[index] = mR3(dTauxx, dTauyy, dTauzz);
sortedDerivTauXyXzYz[index] = mR3(dTauxy, dTauxz, dTauyz);
}
//--------------------------------------------------------------------------------------------------------------------------------
__global__ void CalcVel_XSPH_D(Real3* vel_XSPH_Sorted_D, // output: new velocity
Real4* sortedPosRad_old, // input: sorted positions
Real4* sortedPosRad, // input: sorted positions
Real3* sortedVelMas, // input: sorted velocities
Real4* sortedRhoPreMu,
Real3* shift_r,
uint* gridMarkerIndex, // input: sorted particle indices
uint* cellStart,
uint* cellEnd,
const size_t numAllMarkers,
volatile bool* isErrorD) {
uint index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= numAllMarkers)
return;
Real4 rhoPreMuA = sortedRhoPreMu[index];
Real3 velMasA = sortedVelMas[index];
Real SuppRadii = RESOLUTION_LENGTH_MULT * paramsD.HSML;
Real3 posRadA = mR3(sortedPosRad_old[index]);
Real3 deltaV = mR3(0);
// get address in grid
int3 gridPos = calcGridPos(posRadA);
Real3 inner_sum = mR3(0.0);
// Real mi_bar = 0.0, r0 = 0.0;
Real3 dV = mR3(0.0f);
// examine neighbouring cells
for (int z = -1; z <= 1; z++) {
for (int y = -1; y <= 1; y++) {
for (int x = -1; x <= 1; x++) {
int3 neighbourPos = gridPos + mI3(x, y, z);
uint gridHash = calcGridHash(neighbourPos);
uint startIndex = cellStart[gridHash];
uint endIndex = cellEnd[gridHash];
for (uint j = startIndex; j < endIndex; j++) {
if (j != index) { // check not colliding with self
Real3 posRadB = mR3(sortedPosRad_old[j]);
Real3 dist3 = Distance(posRadA, posRadB);
Real d = length(dist3);
if (d > SuppRadii)
continue;
Real4 rhoPresMuB = sortedRhoPreMu[j];
if (rhoPresMuB.w != -1.0)
continue;
Real3 velMasB = sortedVelMas[j];
Real rho_bar = 0.5 * (rhoPreMuA.x + rhoPresMuB.x);
deltaV += paramsD.markerMass * (velMasB - velMasA) *
W3h(d, (sortedPosRad_old[index].w + sortedPosRad_old[j].w) * 0.5) / rho_bar;
}
}
}
}
}
vel_XSPH_Sorted_D[index] = paramsD.EPS_XSPH * deltaV + shift_r[index]*(1.0/paramsD.dT);
if (!(isfinite(vel_XSPH_Sorted_D[index].x) && isfinite(vel_XSPH_Sorted_D[index].y) &&
isfinite(vel_XSPH_Sorted_D[index].z))) {
printf("Error! particle vXSPH is NAN: thrown from ChFsiForceExplicitSPH.cu, newVel_XSPH_D !\n");
*isErrorD = true;
}
}
//--------------------------------------------------------------------------------------------------------------------------------
ChFsiForceExplicitSPH::ChFsiForceExplicitSPH(std::shared_ptr<ChBce> otherBceWorker,
std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD,
std::shared_ptr<ProximityDataD> otherMarkersProximityD,
std::shared_ptr<FsiGeneralData> otherFsiGeneralData,
std::shared_ptr<SimParams> otherParamsH,
std::shared_ptr<NumberOfObjects> otherNumObjects)
: ChFsiForce(otherBceWorker,
otherSortedSphMarkersD,
otherMarkersProximityD,
otherFsiGeneralData,
otherParamsH,
otherNumObjects) {
CopyParams_NumberOfObjects(paramsH, numObjectsH);
density_initialization = 0;
}
//--------------------------------------------------------------------------------------------------------------------------------
ChFsiForceExplicitSPH::~ChFsiForceExplicitSPH() {}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::Finalize() {
ChFsiForce::Finalize();
cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams));
cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects));
cudaMemcpyFromSymbol(paramsH.get(), paramsD, sizeof(SimParams));
cudaDeviceSynchronize();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::ForceSPH(std::shared_ptr<SphMarkerDataD> otherSphMarkersD,
std::shared_ptr<FsiBodiesDataD> otherFsiBodiesD,
std::shared_ptr<FsiMeshDataD> fsiMeshD) {
sphMarkersD = otherSphMarkersD;
fsiCollisionSystem->ArrangeData(sphMarkersD);
bceWorker->ModifyBceVelocity(sphMarkersD, otherFsiBodiesD);
CollideWrapper();
CalculateXSPH_velocity();
// AddGravityToFluid();
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::CollideWrapper() {
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------------------------------------------------------
// thread per particle
uint numThreads, numBlocks;
computeGridSize((int)numObjectsH->numAllMarkers, 128, numBlocks, numThreads);
/* Execute the kernel */
// thrust::device_vector<Real> _sumWij_rhoi(numObjectsH->numAllMarkers);
thrust::device_vector<Real4> sortedDerivVelRho(numObjectsH->numAllMarkers);
thrust::device_vector<Real3> sortedDerivTauXxYyZz(numObjectsH->numAllMarkers);
thrust::device_vector<Real3> sortedDerivTauXyXzYz(numObjectsH->numAllMarkers);
shift_r.resize(numObjectsH->numAllMarkers);
// thrust::fill(_sumWij_rhoi.begin(), _sumWij_rhoi.end(), 0.);
// thrust::fill(shift_r.begin(), shift_r.end(), mR3(0.0));
// thrust::fill(sortedDerivVelRho.begin(), sortedDerivVelRho.end(), mR4(0.0));
// thrust::fill(sortedDerivTauXxYyZz.begin(), sortedDerivTauXxYyZz.end(), mR3(0.0));
// thrust::fill(sortedDerivTauXyXzYz.begin(), sortedDerivTauXyXzYz.end(), mR3(0.0));
// thrust::device_vector<Real4> rhoPresMuD_old = sortedSphMarkersD->rhoPresMuD;
if (density_initialization == 0){
thrust::device_vector<Real> _sumWij_rhoi(numObjectsH->numAllMarkers);
thrust::device_vector<Real4> rhoPresMuD_old = sortedSphMarkersD->rhoPresMuD;
printf("Re-initializing density after %d steps.\n", paramsH->densityReinit);
calcRho_kernel<<<numBlocks, numThreads>>>(
mR4CAST(sortedSphMarkersD->posRadD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR4CAST(rhoPresMuD_old),
R1CAST(_sumWij_rhoi), U1CAST(markersProximityD->cellStartD), U1CAST(markersProximityD->cellEndD),
numObjectsH->numAllMarkers, density_initialization, isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "calcRho_kernel");
}
if(paramsH->elastic_SPH){ // For granular material
// execute the kernel Navier_Stokes and Shear_Stress_Rate in one kernel
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
// execute the kernel
NS_SSR<<<numBlocks, numThreads>>>(
mR4CAST(sortedDerivVelRho),mR3CAST(sortedDerivTauXxYyZz), mR3CAST(sortedDerivTauXyXzYz),
mR3CAST(shift_r), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR3CAST(bceWorker->velMas_ModifiedBCE), mR4CAST(bceWorker->rhoPreMu_ModifiedBCE),
mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD),
U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes and Shear_Stress_Rate");
}
else{ // For fluid
// EOS<<<numBlocks, numThreads>>>(mR4CAST(sortedSphMarkersD->rhoPresMuD),
// numObjectsH->numAllMarkers, isErrorD);
// ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "EOS");
thrust::device_vector<Real3>::iterator iter =
thrust::max_element(sortedSphMarkersD->velMasD.begin(),
sortedSphMarkersD->velMasD.end(), compare_Real3_mag());
Real MaxVel = length(*iter);
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
// execute the kernel
Navier_Stokes<<<numBlocks, numThreads>>>(
mR4CAST(sortedDerivVelRho), mR3CAST(shift_r), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD),
mR3CAST(bceWorker->velMas_ModifiedBCE), mR4CAST(bceWorker->rhoPreMu_ModifiedBCE),
mR3CAST(sortedSphMarkersD->tauXxYyZzD), mR3CAST(sortedSphMarkersD->tauXyXzYzD),
U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, MaxVel, isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "Navier_Stokes");
}
CopySortedToOriginal_Invasive_R4(fsiGeneralData->derivVelRhoD_old,
sortedDerivVelRho, markersProximityD->gridMarkerIndexD);
if(paramsH->elastic_SPH){
CopySortedToOriginal_Invasive_R3(fsiGeneralData->derivTauXxYyZzD,
sortedDerivTauXxYyZz, markersProximityD->gridMarkerIndexD);
CopySortedToOriginal_Invasive_R3(fsiGeneralData->derivTauXyXzYzD,
sortedDerivTauXyXzYz, markersProximityD->gridMarkerIndexD);
}
sortedDerivVelRho.clear();
sortedDerivTauXxYyZz.clear();
sortedDerivTauXyXzYz.clear();
cudaFree(isErrorD);
free(isErrorH);
density_initialization++;
if (density_initialization >= paramsH->densityReinit)
density_initialization = 0;
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::CalculateXSPH_velocity() {
/* Calculate vel_XSPH */
if (vel_XSPH_Sorted_D.size() != numObjectsH->numAllMarkers) {
printf("vel_XSPH_Sorted_D.size() %zd numObjectsH->numAllMarkers %zd \n",
vel_XSPH_Sorted_D.size(), numObjectsH->numAllMarkers);
throw std::runtime_error(
"Error! size error vel_XSPH_Sorted_D Thrown from "
"CalculateXSPH_velocity!\n");
}
bool *isErrorH, *isErrorD;
isErrorH = (bool*)malloc(sizeof(bool));
cudaMalloc((void**)&isErrorD, sizeof(bool));
*isErrorH = false;
cudaMemcpy(isErrorD, isErrorH, sizeof(bool), cudaMemcpyHostToDevice);
//------------------------------------------------------------------------
if(paramsH->elastic_SPH){
// The XSPH vector already included in the shifting vector
CopySortedToOriginal_Invasive_R3(fsiGeneralData->vel_XSPH_D, shift_r, markersProximityD->gridMarkerIndexD);
}
else{
/* thread per particle */
uint numThreads, numBlocks;
computeGridSize((uint)numObjectsH->numAllMarkers, 128, numBlocks, numThreads);
thrust::device_vector<Real4> sortedPosRad_old = sortedSphMarkersD->posRadD;
thrust::fill(vel_XSPH_Sorted_D.begin(), vel_XSPH_Sorted_D.end(), mR3(0.0));
/* Execute the kernel */
CalcVel_XSPH_D<<<numBlocks, numThreads>>>(
mR3CAST(vel_XSPH_Sorted_D), mR4CAST(sortedPosRad_old), mR4CAST(sortedSphMarkersD->posRadD),
mR3CAST(sortedSphMarkersD->velMasD), mR4CAST(sortedSphMarkersD->rhoPresMuD), mR3CAST(shift_r),
U1CAST(markersProximityD->gridMarkerIndexD), U1CAST(markersProximityD->cellStartD),
U1CAST(markersProximityD->cellEndD), numObjectsH->numAllMarkers, isErrorD);
ChUtilsDevice::Sync_CheckError(isErrorH, isErrorD, "CalcVel_XSPH_D");
CopySortedToOriginal_NonInvasive_R3(fsiGeneralData->vel_XSPH_D, vel_XSPH_Sorted_D,
markersProximityD->gridMarkerIndexD);
// CopySortedToOriginal_NonInvasive_R4(sphMarkersD->posRadD, sortedSphMarkersD->posRadD,
// markersProximityD->gridMarkerIndexD);
}
if (density_initialization % paramsH->densityReinit == 0)
CopySortedToOriginal_NonInvasive_R4(sphMarkersD->rhoPresMuD,
sortedSphMarkersD->rhoPresMuD, markersProximityD->gridMarkerIndexD);
cudaFree(isErrorD);
free(isErrorH);
}
//--------------------------------------------------------------------------------------------------------------------------------
void ChFsiForceExplicitSPH::AddGravityToFluid() {
// add gravity to fluid markers
/* Add outside forces. Don't add gravity to rigids, BCE, and boundaries, it is
* added in ChSystem */
Real3 totalFluidBodyForce3 = paramsH->bodyForce3 + paramsH->gravity;
thrust::device_vector<Real4> bodyForceD(numObjectsH->numAllMarkers);
thrust::fill(bodyForceD.begin(), bodyForceD.end(), mR4(totalFluidBodyForce3));
thrust::transform(
fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].x,
fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].y, bodyForceD.begin(),
fsiGeneralData->derivVelRhoD_old.begin() + fsiGeneralData->referenceArray[0].x, thrust::plus<Real4>());
bodyForceD.clear();
}
} // namespace fsi
} // namespace chrono
//================================================================================================================================
|
7859a420fe59ec5bc70f94dfe2754c52b77f6e53.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "GpuTypes.h"
#include "NNTypes.h"
#include <limits>
static __constant__ GpuData cData;
__device__ inline uint64_t llitoulli(int64_t l)
{
uint64_t u;
asm("mov.b64 %0, %1;" : "=l"(u) : "l"(l));
return u;
}
__device__ inline int64_t ullitolli(uint64_t u)
{
int64_t l;
asm("mov.b64 %0, %1;" : "=l"(l) : "l"(u));
return l;
}
void SetKDeltaGpuData()
{
hipError_t status;
status = hipMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData));
RTERROR(status, "hipMemcpyToSymbol: SetKernelsGpuData copy to cData failed");
}
void GetKDeltaGpuData()
{
hipError_t status;
status = hipMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData));
RTERROR(status, "hipMemcpyToSymbol: SetKernelsGpuData copy From cData failed");
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T> void kCalculateOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
hipLaunchKernelGGL(( kCalculateSigmoidOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidOutputDelta_kernel");
break;
case Tanh:
hipLaunchKernelGGL(( kCalculateTanhOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateTanhOutputDelta_kernel");
break;
case Linear:
hipLaunchKernelGGL(( kCalculateLinearOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateLinearOutputDelta_kernel");
break;
case RectifiedLinear:
hipLaunchKernelGGL(( kCalculateReluOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateReluOutputDelta_kernel");
break;
case SoftMax:
hipLaunchKernelGGL(( kCalculateSoftMaxOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSoftMaxOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = cData._deltaBoost_zero * a * a * ((NNFloat)1.0 - a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawTanhOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * ((NNFloat)1.0 - a * a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLinearOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - (NNFloat)1.0;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawReluOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * (a > (NNFloat)0.0);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSoftMaxOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (end - pos1);
pos1 += threadIdx.x & cData._warpMask;
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawReluOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawReluOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroReluOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroReluOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawTanhOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLinearOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawReluOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawReluOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroReluOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroReluOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<typename T> void kCalculateCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
case SoftMax:
hipLaunchKernelGGL(( kCalculateSigmoidCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = cData._deltaBoost_zero * a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0);
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel");
break;
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T> void kCalculateScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
hipLaunchKernelGGL(( kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
break;
case SoftMax:
hipLaunchKernelGGL(( kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
output = cData._SMCE_zeroScale * a;
pDelta[pos] = output;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - (NNFloat)1.0);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
output = cData._SMCE_zeroScale * a;
pDelta[pos] = output;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (NNFloat)(end - pos1);
uint64_t offset = pos * stride;
pos1 += threadIdx.x & cData._warpMask;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - t);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
{
output = cData._SMCE_zeroScale * a;
}
pDelta[pos] = output;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
{
output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0);
}
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
cout << "unsupported activation for this cost function" << endl;
getGpu().Shutdown();
exit(-1);
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * (a > (NNFloat)0.0);
}
}
template<typename T> void kCalculateL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
hipLaunchKernelGGL(( kCalculateSigmoidL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidL1OutputDelta_kernel");
break;
case Tanh:
hipLaunchKernelGGL(( kCalculateTanhL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateTanhL1OutputDelta_kernel");
break;
case Linear:
hipLaunchKernelGGL(( kCalculateLinearL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateL1OutputDelta_kernel");
break;
case RectifiedLinear:
hipLaunchKernelGGL(( kCalculateReluL1OutputDelta_kernel), dim3(grid), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateReluL1OutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = ((a > (NNFloat)0.0) ? (NNFloat)1.0 : (NNFloat)-1.0) * a * ((NNFloat)1.0 - a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = ((a - (NNFloat)1.0) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawTanhL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = (a > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * ((NNFloat)1.0 - a * a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = ((a - (NNFloat)1.0) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLinearL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = (a > (NNFloat)0.0) ? (NNFloat)1.0 : (NNFloat)-1.0;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawReluL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = (a > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * (a > (NNFloat)0.0);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroReluL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = ((a - (NNFloat)1.0) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
hipMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawSigmoidL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateRawSigmoidL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateNonZeroSigmoidL1OutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawTanhL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateRawTanhL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroTanhL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateNonZeroTanhL1OutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawLinearL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateRawLinearL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroLinearL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateNonZeroLinearL1OutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
hipLaunchKernelGGL(( kCalculateSparseRawReluL1OutputDelta_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateRawReluL1OutputDelta_kernel");
}
hipLaunchKernelGGL(( kCalculateSparseNonZeroReluL1OutputDelta_kernel), dim3(grid2), dim3(getGpu()._threadsPerBlock), 0, 0, position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateNonZeroL1OutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparsenessPenalty_kernel(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
// Calculate sum of activations
if (pos < stride)
{
NNFloat pi = (NNFloat)0.0;
for (int i = 0; i < batch; i++)
{
pi += pUnit[pos];
pos += stride;
}
// Calculate sparseness penalty
pi /= (NNFloat)batch;
pi = max(MIN_ACTIVATION, min(MAX_ACTIVATION, pi));
NNFloat penalty = beta * (-p / pi + ((NNFloat)1.0 - p) / ((NNFloat)1.0 - pi));
// Apply sparseness penalty to deltas
pos = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < batch; i++)
{
pDelta[pos] += penalty;
pos += stride;
}
}
}
// Calculates and applies sparseness penalty to hidden layers
void kCalculateSparsenessPenalty(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta)
{
dim3 grid1(CalculateBlocks(stride));
hipLaunchKernelGGL(( kCalculateSparsenessPenalty_kernel), dim3(grid1), dim3(getGpu()._threadsPerBlock), 0, 0, batch, stride, pUnit, pDelta, p, beta);
LAUNCHERROR("kCalculateSparsenessPenalty_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat scale, NNFloat oneOverScale)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
NNFloat d = pDelta[pos];
x *= oneOverScale;
pDelta[pos] = scale * x * ((NNFloat)1.0 - x) * d;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat scale, NNFloat oneOverScale)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
NNFloat d = pDelta[pos];
x *= oneOverScale;
pDelta[pos] = scale * ((NNFloat)1.0 - x * x) * d;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateReluHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
if (x <= (NNFloat)0.0)
pDelta[pos] = (NNFloat)0.0;
}
}
void kCalculateHadamardProduct(Activation activation, uint64_t size, NNFloat scale, NNFloat* pUnit, NNFloat* pDelta)
{
uint32_t blocks = CalculateBlocks(size);
NNFloat oneOverScale = (NNFloat)1.0 / scale;
switch (activation)
{
case Sigmoid:
hipLaunchKernelGGL(( kCalculateSigmoidHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, scale, oneOverScale);
LAUNCHERROR("kCalculateSigmoidHadamardProduct_kernel");
break;
case Tanh:
hipLaunchKernelGGL(( kCalculateTanhHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta, scale, oneOverScale);
LAUNCHERROR("kCalculateTanhHadamardProduct_kernel");
break;
case Linear:
// Derivative of linear output is 1, no need to call any kernel here
break;
case RectifiedLinear:
hipLaunchKernelGGL(( kCalculateReluHadamardProduct_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, size, pUnit, pDelta);
LAUNCHERROR("kCalculateReluHadamardProduct_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kNormalizeDeltas_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Calculate vector length
uint32_t pos = tgx;
NNFloat r2 = (NNFloat)0.0;
while (pos < stride)
{
NNFloat x = pDelta[pos];
r2 += x * x;
pos += cData._warpSize;
}
// Reduce sum
r2 += __shfl(r2, tgx ^ 1);
r2 += __shfl(r2, tgx ^ 2);
r2 += __shfl(r2, tgx ^ 4);
r2 += __shfl(r2, tgx ^ 8);
r2 += __shfl(r2, tgx ^ 16);
// Normalalize vector if too large
if (r2 > norm * norm)
{
norm *= rsqrt(r2);
pos = tgx;
while (pos < stride)
{
pDelta[pos] *= norm;
pos += cData._warpSize;
}
}
}
}
void kNormalizeDeltas(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta)
{
uint32_t blocks = (batch + 3) / 4;
hipLaunchKernelGGL(( kNormalizeDeltas_kernel), dim3(blocks), dim3(128), 0, 0, norm, batch, stride, pDelta);
LAUNCHERROR("kNormalizeDeltas_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateDeltaMagnitudes_kernel(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Calculate vector length
uint32_t pos = tgx;
NNFloat r2 = (NNFloat)0.0;
while (pos < stride)
{
NNFloat x = pDelta[pos];
r2 += x * x;
pos += cData._warpSize;
}
// Reduce sum
r2 += __shfl(r2, tgx ^ 1);
r2 += __shfl(r2, tgx ^ 2);
r2 += __shfl(r2, tgx ^ 4);
r2 += __shfl(r2, tgx ^ 8);
r2 += __shfl(r2, tgx ^ 16);
// Output result
if (tgx == 0)
pMagnitude[dpos] = r2;
}
}
void kCalculateDeltaMagnitudes(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t blocks = (batch + 3) / 4;
hipLaunchKernelGGL(( kCalculateDeltaMagnitudes_kernel), dim3(blocks), dim3(128), 0, 0, batch, stride, pDelta, pMagnitude);
LAUNCHERROR("kCalculateDeltaMagnitudes_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kNormalizeDeltaMagnitudes_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Normalalize vector if too large
NNFloat r2 = pMagnitude[dpos];
if (r2 > norm * norm)
{
norm *= rsqrt(r2);
uint32_t pos = tgx;
while (pos < stride)
{
pDelta[pos] *= norm;
pos += cData._warpSize;
}
}
}
}
void kNormalizeDeltaMagnitudes(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t blocks = (batch + 3) / 4;
hipLaunchKernelGGL(( kNormalizeDeltaMagnitudes_kernel), dim3(blocks), dim3(128), 0, 0, norm, batch, stride, pDelta, pMagnitude);
LAUNCHERROR("kNormalizeDeltaMagnitudes_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateMaxoutDelta_kernel(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat s = pSrc[pos];
NNFloat sdelta = pSrcDelta[pos];
NNFloat d = pDst[pos];
NNFloat delta = (s == d) ? sdelta : (NNFloat)0;
if (beta == (NNFloat)0)
pDstDelta[pos] = delta;
else if (delta != (NNFloat)0.0)
pDstDelta[pos] = beta * pDstDelta[pos] + delta;
}
}
void kCalculateMaxoutDelta(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta)
{
unsigned long blocks = CalculateBlocks(size);
hipLaunchKernelGGL(( kCalculateMaxoutDelta_kernel), dim3(blocks), dim3(getGpu()._threadsPerBlock), 0, 0, pSrc, pSrcDelta, size, beta, pDst, pDstDelta);
LAUNCHERROR("kCalculateMaxoutDelta_kernel");
}
// Instantiates allowable templated functions so we can hide the implementations here
// instead of in the header file because we're mixing CUDA and C++ and that's
// a migraine headache in the making otherwise.
void KDeltaTempFunction()
{
kCalculateCrossEntropyOutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<long>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateSparseAnalogOutputDelta<NNFloat>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<double>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<unsigned char>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<char>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<uint32_t>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<uint64_t>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<int32_t>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<int64_t>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<long>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<long>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
}
| 7859a420fe59ec5bc70f94dfe2754c52b77f6e53.cu | /*
Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at
http://aws.amazon.com/apache2.0/
or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
*/
#include "GpuTypes.h"
#include "NNTypes.h"
#include <limits>
static __constant__ GpuData cData;
__device__ inline uint64_t llitoulli(int64_t l)
{
uint64_t u;
asm("mov.b64 %0, %1;" : "=l"(u) : "l"(l));
return u;
}
__device__ inline int64_t ullitolli(uint64_t u)
{
int64_t l;
asm("mov.b64 %0, %1;" : "=l"(l) : "l"(u));
return l;
}
void SetKDeltaGpuData()
{
cudaError_t status;
status = cudaMemcpyToSymbol(cData, &(getGpu()._data), sizeof(GpuData));
RTERROR(status, "cudaMemcpyToSymbol: SetKernelsGpuData copy to cData failed");
}
void GetKDeltaGpuData()
{
cudaError_t status;
status = cudaMemcpyFromSymbol(&(getGpu()._data), cData, sizeof(GpuData));
RTERROR(status, "cudaMemcpyToSymbol: SetKernelsGpuData copy From cData failed");
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) * (a > (NNFloat)0.0);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = a - t;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = a - t;
}
}
template<typename T> void kCalculateOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
kCalculateSigmoidOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidOutputDelta_kernel");
break;
case Tanh:
kCalculateTanhOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateTanhOutputDelta_kernel");
break;
case Linear:
kCalculateLinearOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateLinearOutputDelta_kernel");
break;
case RectifiedLinear:
kCalculateReluOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateReluOutputDelta_kernel");
break;
case SoftMax:
kCalculateSoftMaxOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSoftMaxOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = cData._deltaBoost_zero * a * a * ((NNFloat)1.0 - a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawTanhOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * ((NNFloat)1.0 - a * a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLinearOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - (NNFloat)1.0;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawReluOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a * (a > (NNFloat)0.0);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSoftMaxOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (end - pos1);
pos1 += threadIdx.x & cData._warpMask;
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
kCalculateSparseNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
kCalculateSparseNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
kCalculateSparseNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawReluOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawReluOutputDelta_kernel");
}
kCalculateSparseNonZeroReluOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroReluOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
kCalculateSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = cData._deltaBoost_one * (a - t) * a * (t - a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroReluOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = (a - t) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, unsigned char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 256.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, char* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat t = (NNFloat)pSparseData[pos1] * (NNFloat)(1.0 / 128.0);
pDelta[pos2] = a - t;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseAnalogOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroSigmoidOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroSigmoidSparseOutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawTanhOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawTanhOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroTanhOutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLinearOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawLinearOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroLinearOutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawReluOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawReluOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroReluOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroReluOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonZeroSoftMaxOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t);
}
}
template<typename T> void kCalculateCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
case SoftMax:
kCalculateSigmoidCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = cData._deltaBoost_zero * a;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = cData._deltaBoost_one * (a - (NNFloat)1.0);
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxOutputDelta_kernel");
}
kCalculateSparseNonZeroSoftMaxOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxOutputDelta_kernel");
break;
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
kCalculateSparseNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint64_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint32_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
pDelta[pos2] = cData._deltaBoost_one * (a - t);
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseAnalogCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case SoftMax:
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidCrossEntropyOutputDelta_kernel");
}
kCalculateSparseAnalogNonZeroSigmoidCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseAnalogNonzeroSigmoidCrossEntropyOutputDelta_kernel");
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t == (NNFloat)1.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 256.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * (NNFloat)(1.0 / 128.0);
NNFloat output = (NNFloat)0.0;
if ((t > (NNFloat)0.0) && (a < cData._SMCE_oneTarget))
output = cData._SMCE_oneScale * (a - t);
else if ((t == (NNFloat)0.0) && (a > cData._SMCE_zeroTarget))
output = cData._SMCE_zeroScale * (a - t);
pDelta[uOffset + pos] = output;
}
}
template<typename T> void kCalculateScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
kCalculateSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
break;
case SoftMax:
kCalculateSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
output = cData._SMCE_zeroScale * a;
pDelta[pos] = output;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - (NNFloat)1.0);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
output = cData._SMCE_zeroScale * a;
pDelta[pos] = output;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos];
uint64_t end = pSparseEnd[dpos];
NNFloat t = (NNFloat)1.0 / (NNFloat)(end - pos1);
uint64_t offset = pos * stride;
pos1 += threadIdx.x & cData._warpMask;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
output = cData._SMCE_oneScale * (a - t);
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidScaledMarginalCrossEntropyOutputDelta_kernel");
}
kCalculateSparseNonZeroSigmoidScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroScaleMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
}
kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateSparseNonZeroSoftMaxScaledMarginalCrossEntropyOutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
NNFloat output = (NNFloat)0.0;
if (a > cData._SMCE_zeroTarget)
{
output = cData._SMCE_zeroScale * a;
}
pDelta[pos] = output;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
T t = pSparseData[pos1];
NNFloat output = (NNFloat)0.0;
if (a < cData._SMCE_oneTarget)
{
output = cData._SMCE_oneScale * t * (a - (NNFloat)1.0);
}
pDelta[pos2] = output;
pos1 += cData._warpSize;
}
}
}
template<typename T>
void kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex, T* pSparseData, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateSparseRawSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
}
kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex, pSparseData);
LAUNCHERROR("kCalculateSparseNonZeroSigmoidDataScaledMarginalCrossEntropyOutputDelta_kernel");
break;
case SoftMax:
cout << "unsupported activation for this cost function" << endl;
getGpu().Shutdown();
exit(-1);
break;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * a * ((NNFloat)1.0 - a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * ((NNFloat)1.0 - a * a);
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = (a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0;
}
}
template<typename T>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = pData[dOffset + pos];
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = (a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, unsigned char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * (a > (NNFloat)0.0);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 256.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * a * ((NNFloat)1.0 - a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * ((NNFloat)1.0 - a * a);
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = (a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0;
}
}
template<>
__global__ void
LAUNCH_BOUNDS()
kCalculateReluL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, char* pData)
{
uint64_t pos = (blockIdx.y * blockDim.x) + threadIdx.x;
if (pos < stride)
{
uint64_t uOffset = blockIdx.x * stride;
uint64_t dOffset = (cData._bShuffleIndices ? cData._pShuffleIndex[position + blockIdx.x] : position + blockIdx.x) * stride;
NNFloat a = pUnit[uOffset + pos];
NNFloat t = (NNFloat)pData[dOffset + pos] * NNFloat(1.0 / 128.0);
pDelta[uOffset + pos] = ((a - t) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * (a > (NNFloat)0.0);
}
}
template<typename T> void kCalculateL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, T* pData)
{
dim3 grid(batch, (stride + getGpu()._threadsPerBlock - 1) / getGpu()._threadsPerBlock);
switch (activation)
{
case Sigmoid:
kCalculateSigmoidL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateSigmoidL1OutputDelta_kernel");
break;
case Tanh:
kCalculateTanhL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateTanhL1OutputDelta_kernel");
break;
case Linear:
kCalculateLinearL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateL1OutputDelta_kernel");
break;
case RectifiedLinear:
kCalculateReluL1OutputDelta_kernel<<<grid, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pData);
LAUNCHERROR("kCalculateReluL1OutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawSigmoidL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = ((a > (NNFloat)0.0) ? (NNFloat)1.0 : (NNFloat)-1.0) * a * ((NNFloat)1.0 - a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = ((a - (NNFloat)1.0) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * a * ((NNFloat)1.0 - a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawTanhL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = (a > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * ((NNFloat)1.0 - a * a);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroTanhL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t* pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = ((a - (NNFloat)1.0) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * ((NNFloat)1.0 - a * a);
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawLinearL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = (a > (NNFloat)0.0) ? (NNFloat)1.0 : (NNFloat)-1.0;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroLinearL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = (a - (NNFloat)1.0) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0;
pos1 += cData._warpSize;
}
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseRawReluL1OutputDelta_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = (blockIdx.x * blockDim.x) + threadIdx.x;
if (pos < size)
{
NNFloat a = pUnit[pos];
pDelta[pos] = (a > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * (a > (NNFloat)0.0);
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparseNonZeroReluL1OutputDelta_kernel(uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex)
{
uint64_t pos = ((blockIdx.x * blockDim.x) + threadIdx.x) / cData._warpSize;
if (pos < batch)
{
uint32_t dpos = cData._bShuffleIndices ? cData._pShuffleIndex[position + pos] : position + pos;
uint64_t pos1 = pSparseStart[dpos] + (threadIdx.x & cData._warpMask);
uint64_t end = pSparseEnd[dpos];
uint64_t offset = pos * stride;
while (pos1 < end)
{
uint64_t pos2 = offset + pSparseIndex[pos1];
NNFloat a = pUnit[pos2];
pDelta[pos2] = ((a - (NNFloat)1.0) > (NNFloat)0.0 ? (NNFloat)1.0 : (NNFloat)-1.0) * (a > (NNFloat)0.0);
pos1 += cData._warpSize;
}
}
}
void kCalculateSparseL1OutputDelta(Activation activation, uint32_t position, uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, uint64_t* pSparseStart, uint64_t *pSparseEnd, uint32_t *pSparseIndex, bool bSparseIgnoreZero)
{
uint64_t size = (uint64_t)batch * (uint64_t)stride;
dim3 grid1(CalculateBlocks(size));
dim3 grid2(CalculateBlocks(batch * getGpu()._data._warpSize));
// Clear entire delta if ignoring zero outputs
if (bSparseIgnoreZero)
{
cudaMemset(pDelta, 0, size * sizeof(NNFloat));
}
switch (activation)
{
case Sigmoid:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawSigmoidL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateRawSigmoidL1OutputDelta_kernel");
}
kCalculateSparseNonZeroSigmoidL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateNonZeroSigmoidL1OutputDelta_kernel");
break;
case Tanh:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawTanhL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateRawTanhL1OutputDelta_kernel");
}
kCalculateSparseNonZeroTanhL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateNonZeroTanhL1OutputDelta_kernel");
break;
case Linear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawLinearL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateRawLinearL1OutputDelta_kernel");
}
kCalculateSparseNonZeroLinearL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateNonZeroLinearL1OutputDelta_kernel");
break;
case RectifiedLinear:
if (!bSparseIgnoreZero)
{
kCalculateSparseRawReluL1OutputDelta_kernel<<<grid1, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateRawReluL1OutputDelta_kernel");
}
kCalculateSparseNonZeroReluL1OutputDelta_kernel<<<grid2, getGpu()._threadsPerBlock>>>(position, batch, stride, pUnit, pDelta, pSparseStart, pSparseEnd, pSparseIndex);
LAUNCHERROR("kCalculateNonZeroL1OutputDelta_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSparsenessPenalty_kernel(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
// Calculate sum of activations
if (pos < stride)
{
NNFloat pi = (NNFloat)0.0;
for (int i = 0; i < batch; i++)
{
pi += pUnit[pos];
pos += stride;
}
// Calculate sparseness penalty
pi /= (NNFloat)batch;
pi = max(MIN_ACTIVATION, min(MAX_ACTIVATION, pi));
NNFloat penalty = beta * (-p / pi + ((NNFloat)1.0 - p) / ((NNFloat)1.0 - pi));
// Apply sparseness penalty to deltas
pos = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < batch; i++)
{
pDelta[pos] += penalty;
pos += stride;
}
}
}
// Calculates and applies sparseness penalty to hidden layers
void kCalculateSparsenessPenalty(uint32_t batch, uint32_t stride, NNFloat* pUnit, NNFloat* pDelta, NNFloat p, NNFloat beta)
{
dim3 grid1(CalculateBlocks(stride));
kCalculateSparsenessPenalty_kernel<<<grid1, getGpu()._threadsPerBlock>>>(batch, stride, pUnit, pDelta, p, beta);
LAUNCHERROR("kCalculateSparsenessPenalty_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateSigmoidHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat scale, NNFloat oneOverScale)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
NNFloat d = pDelta[pos];
x *= oneOverScale;
pDelta[pos] = scale * x * ((NNFloat)1.0 - x) * d;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateTanhHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta, NNFloat scale, NNFloat oneOverScale)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
NNFloat d = pDelta[pos];
x *= oneOverScale;
pDelta[pos] = scale * ((NNFloat)1.0 - x * x) * d;
}
}
__global__ void
LAUNCH_BOUNDS()
kCalculateReluHadamardProduct_kernel(uint64_t size, NNFloat* pUnit, NNFloat* pDelta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat x = pUnit[pos];
if (x <= (NNFloat)0.0)
pDelta[pos] = (NNFloat)0.0;
}
}
void kCalculateHadamardProduct(Activation activation, uint64_t size, NNFloat scale, NNFloat* pUnit, NNFloat* pDelta)
{
uint32_t blocks = CalculateBlocks(size);
NNFloat oneOverScale = (NNFloat)1.0 / scale;
switch (activation)
{
case Sigmoid:
kCalculateSigmoidHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, scale, oneOverScale);
LAUNCHERROR("kCalculateSigmoidHadamardProduct_kernel");
break;
case Tanh:
kCalculateTanhHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta, scale, oneOverScale);
LAUNCHERROR("kCalculateTanhHadamardProduct_kernel");
break;
case Linear:
// Derivative of linear output is 1, no need to call any kernel here
break;
case RectifiedLinear:
kCalculateReluHadamardProduct_kernel<<<blocks, getGpu()._threadsPerBlock>>>(size, pUnit, pDelta);
LAUNCHERROR("kCalculateReluHadamardProduct_kernel");
break;
}
}
__global__ void
LAUNCH_BOUNDS()
kNormalizeDeltas_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Calculate vector length
uint32_t pos = tgx;
NNFloat r2 = (NNFloat)0.0;
while (pos < stride)
{
NNFloat x = pDelta[pos];
r2 += x * x;
pos += cData._warpSize;
}
// Reduce sum
r2 += __shfl(r2, tgx ^ 1);
r2 += __shfl(r2, tgx ^ 2);
r2 += __shfl(r2, tgx ^ 4);
r2 += __shfl(r2, tgx ^ 8);
r2 += __shfl(r2, tgx ^ 16);
// Normalalize vector if too large
if (r2 > norm * norm)
{
norm *= rsqrt(r2);
pos = tgx;
while (pos < stride)
{
pDelta[pos] *= norm;
pos += cData._warpSize;
}
}
}
}
void kNormalizeDeltas(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta)
{
uint32_t blocks = (batch + 3) / 4;
kNormalizeDeltas_kernel<<<blocks, 128>>>(norm, batch, stride, pDelta);
LAUNCHERROR("kNormalizeDeltas_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateDeltaMagnitudes_kernel(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Calculate vector length
uint32_t pos = tgx;
NNFloat r2 = (NNFloat)0.0;
while (pos < stride)
{
NNFloat x = pDelta[pos];
r2 += x * x;
pos += cData._warpSize;
}
// Reduce sum
r2 += __shfl(r2, tgx ^ 1);
r2 += __shfl(r2, tgx ^ 2);
r2 += __shfl(r2, tgx ^ 4);
r2 += __shfl(r2, tgx ^ 8);
r2 += __shfl(r2, tgx ^ 16);
// Output result
if (tgx == 0)
pMagnitude[dpos] = r2;
}
}
void kCalculateDeltaMagnitudes(uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t blocks = (batch + 3) / 4;
kCalculateDeltaMagnitudes_kernel<<<blocks, 128>>>(batch, stride, pDelta, pMagnitude);
LAUNCHERROR("kCalculateDeltaMagnitudes_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kNormalizeDeltaMagnitudes_kernel(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t dpos = (blockIdx.x * blockDim.x + threadIdx.x) >> cData._warpBits;
uint32_t tgx = threadIdx.x & cData._warpMask;
pDelta += dpos * stride;
if (dpos < batch)
{
// Normalalize vector if too large
NNFloat r2 = pMagnitude[dpos];
if (r2 > norm * norm)
{
norm *= rsqrt(r2);
uint32_t pos = tgx;
while (pos < stride)
{
pDelta[pos] *= norm;
pos += cData._warpSize;
}
}
}
}
void kNormalizeDeltaMagnitudes(NNFloat norm, uint32_t batch, uint32_t stride, NNFloat* pDelta, NNFloat* pMagnitude)
{
uint32_t blocks = (batch + 3) / 4;
kNormalizeDeltaMagnitudes_kernel<<<blocks, 128>>>(norm, batch, stride, pDelta, pMagnitude);
LAUNCHERROR("kNormalizeDeltaMagnitudes_kernel");
}
__global__ void
LAUNCH_BOUNDS()
kCalculateMaxoutDelta_kernel(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta)
{
uint64_t pos = blockIdx.x * blockDim.x + threadIdx.x;
if (pos < size)
{
NNFloat s = pSrc[pos];
NNFloat sdelta = pSrcDelta[pos];
NNFloat d = pDst[pos];
NNFloat delta = (s == d) ? sdelta : (NNFloat)0;
if (beta == (NNFloat)0)
pDstDelta[pos] = delta;
else if (delta != (NNFloat)0.0)
pDstDelta[pos] = beta * pDstDelta[pos] + delta;
}
}
void kCalculateMaxoutDelta(NNFloat* pSrc, NNFloat* pSrcDelta, size_t size, NNFloat beta, NNFloat* pDst, NNFloat* pDstDelta)
{
unsigned long blocks = CalculateBlocks(size);
kCalculateMaxoutDelta_kernel<<<blocks, getGpu()._threadsPerBlock>>>(pSrc, pSrcDelta, size, beta, pDst, pDstDelta);
LAUNCHERROR("kCalculateMaxoutDelta_kernel");
}
// Instantiates allowable templated functions so we can hide the implementations here
// instead of in the header file because we're mixing CUDA and C++ and that's
// a migraine headache in the making otherwise.
void KDeltaTempFunction()
{
kCalculateCrossEntropyOutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateCrossEntropyOutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateL1OutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateOutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateScaledMarginalCrossEntropyOutputDelta<long>(Sigmoid, 0, 0, 0, NULL, NULL, NULL);
kCalculateSparseAnalogOutputDelta<NNFloat>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<double>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<unsigned char>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<char>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<uint32_t>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<uint64_t>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<int32_t>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<int64_t>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseAnalogOutputDelta<long>(Linear, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<NNFloat>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<double>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<unsigned char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<char>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<uint32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<uint64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<int32_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<int64_t>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
kCalculateSparseDataScaledMarginalCrossEntropyOutputDelta<long>(Sigmoid, 0, 0, 0, NULL, NULL, NULL, NULL, NULL, NULL, false);
}
|
eaf3f816ea08608cd71af51642851ccc74481548.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/NumericUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorCompare.h>
#include <ATen/native/hip/Loops.cuh>
#include <c10/core/Scalar.h>
namespace at { namespace native {
namespace {
void where_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.dtype(), "where_cuda", [&] {
gpu_kernel(
iter,
[=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
});
}
void isposinf_kernel_impl(TensorIteratorBase &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void isneginf_kernel_impl(TensorIteratorBase &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void clamp_kernel_impl(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower, scalar_t upper) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (at::_isnan(v)) {
return v;
} if (at::_isnan(lower)) {
return lower;
} if (at::_isnan(upper)) {
return upper;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void inline launch_clamp_scalar(TensorIteratorBase& iter, Scalar lim0, Scalar lim1, at::native::detail::ClampLimits minmax){
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_scalar_cuda", [&] {
using opmath_t = at::opmath_type<scalar_t>;
auto lim0_val = lim0.to<opmath_t>();
auto lim1_val = lim1.to<opmath_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(static_cast<opmath_t>(v))) {
return v;
} else if (minmax==at::native::detail::ClampLimits::Min){
return ::max(static_cast<opmath_t>(v), lim0_val);
} else if (minmax==at::native::detail::ClampLimits::Max){
return ::min(static_cast<opmath_t>(v), lim0_val);
} else {
return ::min(::max(static_cast<opmath_t>(v), lim0_val), lim1_val);
}
});
});
}
void clamp_scalar_kernel_impl(TensorIteratorBase& iter, const Scalar& min, const Scalar& max) {
launch_clamp_scalar(iter, min, max, at::native::detail::ClampLimits::MinMax);
}
void clamp_min_scalar_kernel_impl(TensorIteratorBase& iter, Scalar min) {
launch_clamp_scalar(iter, min, min, at::native::detail::ClampLimits::Min);
}
void clamp_max_scalar_kernel_impl(TensorIteratorBase& iter, Scalar max) {
launch_clamp_scalar(iter, max, max, at::native::detail::ClampLimits::Max);
}
} // anonymous namespace
REGISTER_DISPATCH(where_kernel, &where_kernel_impl);
REGISTER_DISPATCH(isposinf_stub, &isposinf_kernel_impl);
REGISTER_DISPATCH(isneginf_stub, &isneginf_kernel_impl);
REGISTER_DISPATCH(clamp_stub, &clamp_kernel_impl);
REGISTER_DISPATCH(clamp_scalar_stub, &clamp_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_min_scalar_stub, &clamp_min_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_max_scalar_stub, &clamp_max_scalar_kernel_impl);
template <typename scalar_t>
__global__ void _assert_async_cuda_kernel(scalar_t* input) {
CUDA_KERNEL_ASSERT(input[0] != 0);
}
__global__ void _assert_async_cuda_kernel(c10::complex<float>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<float>(0, 0));
}
__global__ void _assert_async_cuda_kernel(c10::complex<double>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<double>(0, 0));
}
void _assert_async_cuda(const Tensor& self_tensor) {
const TensorBase &self = get_tensor_base(self_tensor);
auto n = self.numel();
TORCH_CHECK(n != 0, "Boolean value of Tensor with no values is ambiguous");
TORCH_CHECK(n < 2, "Boolean value of Tensor with more than one value is ambiguous");
auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "_assert_async_cuda", [&] {
hipLaunchKernelGGL(( _assert_async_cuda_kernel), dim3(1), dim3(1), 0, stream, self.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
});
}
}} // namespace at::native
| eaf3f816ea08608cd71af51642851ccc74481548.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/NumericUtils.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorCompare.h>
#include <ATen/native/cuda/Loops.cuh>
#include <c10/core/Scalar.h>
namespace at { namespace native {
namespace {
void where_kernel_impl(TensorIterator &iter) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.dtype(), "where_cuda", [&] {
gpu_kernel(
iter,
[=] GPU_LAMBDA (bool cond_val, scalar_t self_val, scalar_t other_val) -> scalar_t {
return cond_val ? self_val : other_val;
});
});
}
void isposinf_kernel_impl(TensorIteratorBase &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isposinf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void isneginf_kernel_impl(TensorIteratorBase &iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.input_dtype(), "isneginf_cuda", [&]() {
gpu_kernel(
iter,
[] GPU_LAMBDA (scalar_t a) -> bool { return a == -std::numeric_limits<scalar_t>::infinity(); }
);
});
}
void clamp_kernel_impl(TensorIteratorBase& iter) {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_cuda", [&] {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t v, scalar_t lower, scalar_t upper) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (at::_isnan(v)) {
return v;
} if (at::_isnan(lower)) {
return lower;
} if (at::_isnan(upper)) {
return upper;
} else {
return ::min(::max(v, lower), upper);
}
});
});
}
void inline launch_clamp_scalar(TensorIteratorBase& iter, Scalar lim0, Scalar lim1, at::native::detail::ClampLimits minmax){
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "clamp_min_scalar_cuda", [&] {
using opmath_t = at::opmath_type<scalar_t>;
auto lim0_val = lim0.to<opmath_t>();
auto lim1_val = lim1.to<opmath_t>();
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t v) -> scalar_t {
// Propagate nan, which doesn't propagate automatically for ROCm
if (_isnan(static_cast<opmath_t>(v))) {
return v;
} else if (minmax==at::native::detail::ClampLimits::Min){
return ::max(static_cast<opmath_t>(v), lim0_val);
} else if (minmax==at::native::detail::ClampLimits::Max){
return ::min(static_cast<opmath_t>(v), lim0_val);
} else {
return ::min(::max(static_cast<opmath_t>(v), lim0_val), lim1_val);
}
});
});
}
void clamp_scalar_kernel_impl(TensorIteratorBase& iter, const Scalar& min, const Scalar& max) {
launch_clamp_scalar(iter, min, max, at::native::detail::ClampLimits::MinMax);
}
void clamp_min_scalar_kernel_impl(TensorIteratorBase& iter, Scalar min) {
launch_clamp_scalar(iter, min, min, at::native::detail::ClampLimits::Min);
}
void clamp_max_scalar_kernel_impl(TensorIteratorBase& iter, Scalar max) {
launch_clamp_scalar(iter, max, max, at::native::detail::ClampLimits::Max);
}
} // anonymous namespace
REGISTER_DISPATCH(where_kernel, &where_kernel_impl);
REGISTER_DISPATCH(isposinf_stub, &isposinf_kernel_impl);
REGISTER_DISPATCH(isneginf_stub, &isneginf_kernel_impl);
REGISTER_DISPATCH(clamp_stub, &clamp_kernel_impl);
REGISTER_DISPATCH(clamp_scalar_stub, &clamp_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_min_scalar_stub, &clamp_min_scalar_kernel_impl);
REGISTER_DISPATCH(clamp_max_scalar_stub, &clamp_max_scalar_kernel_impl);
template <typename scalar_t>
__global__ void _assert_async_cuda_kernel(scalar_t* input) {
CUDA_KERNEL_ASSERT(input[0] != 0);
}
__global__ void _assert_async_cuda_kernel(c10::complex<float>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<float>(0, 0));
}
__global__ void _assert_async_cuda_kernel(c10::complex<double>* input) {
CUDA_KERNEL_ASSERT(input[0] != c10::complex<double>(0, 0));
}
void _assert_async_cuda(const Tensor& self_tensor) {
const TensorBase &self = get_tensor_base(self_tensor);
auto n = self.numel();
TORCH_CHECK(n != 0, "Boolean value of Tensor with no values is ambiguous");
TORCH_CHECK(n < 2, "Boolean value of Tensor with more than one value is ambiguous");
auto stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, self.scalar_type(), "_assert_async_cuda", [&] {
_assert_async_cuda_kernel<<<1, 1, 0, stream>>>(self.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
}
}} // namespace at::native
|
64f6a8eac092ee652ae3fc9b315f3f12894e2a54.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
# include <stdlib.h> //needed for rand()
# include <stdio.h> //needed for printf()
__global__ void big_add(int *a, int *b, int *c, unsigned int N){
int tid;
tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while(tid < N){
c[tid] = a[tid] + b[tid];
tid += stride;
}
}
void cpu_add(int *a, int *b, int *c, unsigned int N){
for(unsigned int i = 0; i < N; i++){
c[i] = a[i] + b[i];
}
}
int main(){
unsigned int N = 1000000;
// DEVICE PROPERTIES
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop,0);
printf("Max Threads per block: %d\n",prop.maxThreadsPerBlock);
printf("Max Grid Size: %d x %d x %d\n",prop.maxGridSize[0],
prop.maxGridSize[1], prop.maxGridSize[2]);
printf("RAM needed estimate: %lu Mbytes\n", sizeof(int)*N*6/1000000);
int *a = (int *) malloc(N* sizeof(int));
int *b = (int *) malloc(N* sizeof(int));
int *c = (int *) malloc(N* sizeof(int));
int *d_a, *d_b, *d_c;
// set up random number generator
time_t tictoc;
srand((unsigned) time(&tictoc));
printf("copying memory...\n");
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// run the kernel
unsigned int numBlocks, numThreads;
numThreads = 1024;
printf("calculating numBlocks...\n");
numBlocks = (N + numThreads - 1)/numThreads;
if(numBlocks > prop.maxGridSize[1]){
numBlocks = prop.maxGridSize[1];
}
int iterations = 1000;
float milliseconds = 0;
FILE *f = fopen("gpu_add_times.txt", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
for(int j = 0; j<iterations; j++) {
for(unsigned int i = 0; i < N; i++){
a[i] = rand() % 100;
b[i] = rand() % 100;
}
printf("GPU Iteration %d of %d...\n",j,iterations);
// allocate memory
hipMalloc((void **) &d_a, sizeof(int) * N);
hipMalloc((void **) &d_b, sizeof(int) * N);
hipMalloc((void **) &d_c, sizeof(int) * N);
hipEventRecord(start);
// copy memory
hipMemcpy(d_a, a, N * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, N * sizeof(int), hipMemcpyHostToDevice);
big_add << < numBlocks, numThreads >> > (d_a, d_b, d_c, N);
hipEventRecord(stop);
hipMemcpy(c, d_c, N * sizeof(int), hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
hipEventElapsedTime(&milliseconds, start, stop);
fprintf(f, "%f \n", milliseconds);
}
fclose(f);
FILE *g = fopen("cpu_add_times.txt", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
for(int j = 0; j < iterations; j++) {
for(unsigned int i = 0; i < N; i++){
a[i] = rand() % 100;
b[i] = rand() % 100;
}
printf("CPU Iteration %d of %d...\n",j,iterations);
hipEventRecord(start);
cpu_add(a, b, c, N);
hipEventRecord(stop);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
fprintf(g,"%f\n", milliseconds);
}
fclose(g);
return 0;
}
| 64f6a8eac092ee652ae3fc9b315f3f12894e2a54.cu | # include <stdlib.h> //needed for rand()
# include <stdio.h> //needed for printf()
__global__ void big_add(int *a, int *b, int *c, unsigned int N){
int tid;
tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while(tid < N){
c[tid] = a[tid] + b[tid];
tid += stride;
}
}
void cpu_add(int *a, int *b, int *c, unsigned int N){
for(unsigned int i = 0; i < N; i++){
c[i] = a[i] + b[i];
}
}
int main(){
unsigned int N = 1000000;
// DEVICE PROPERTIES
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
printf("Max Threads per block: %d\n",prop.maxThreadsPerBlock);
printf("Max Grid Size: %d x %d x %d\n",prop.maxGridSize[0],
prop.maxGridSize[1], prop.maxGridSize[2]);
printf("RAM needed estimate: %lu Mbytes\n", sizeof(int)*N*6/1000000);
int *a = (int *) malloc(N* sizeof(int));
int *b = (int *) malloc(N* sizeof(int));
int *c = (int *) malloc(N* sizeof(int));
int *d_a, *d_b, *d_c;
// set up random number generator
time_t tictoc;
srand((unsigned) time(&tictoc));
printf("copying memory...\n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// run the kernel
unsigned int numBlocks, numThreads;
numThreads = 1024;
printf("calculating numBlocks...\n");
numBlocks = (N + numThreads - 1)/numThreads;
if(numBlocks > prop.maxGridSize[1]){
numBlocks = prop.maxGridSize[1];
}
int iterations = 1000;
float milliseconds = 0;
FILE *f = fopen("gpu_add_times.txt", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
for(int j = 0; j<iterations; j++) {
for(unsigned int i = 0; i < N; i++){
a[i] = rand() % 100;
b[i] = rand() % 100;
}
printf("GPU Iteration %d of %d...\n",j,iterations);
// allocate memory
cudaMalloc((void **) &d_a, sizeof(int) * N);
cudaMalloc((void **) &d_b, sizeof(int) * N);
cudaMalloc((void **) &d_c, sizeof(int) * N);
cudaEventRecord(start);
// copy memory
cudaMemcpy(d_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
big_add << < numBlocks, numThreads >> > (d_a, d_b, d_c, N);
cudaEventRecord(stop);
cudaMemcpy(c, d_c, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaEventElapsedTime(&milliseconds, start, stop);
fprintf(f, "%f \n", milliseconds);
}
fclose(f);
FILE *g = fopen("cpu_add_times.txt", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
for(int j = 0; j < iterations; j++) {
for(unsigned int i = 0; i < N; i++){
a[i] = rand() % 100;
b[i] = rand() % 100;
}
printf("CPU Iteration %d of %d...\n",j,iterations);
cudaEventRecord(start);
cpu_add(a, b, c, N);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
fprintf(g,"%f\n", milliseconds);
}
fclose(g);
return 0;
}
|
432596ceded179c3e6b968c7ba310ac524f954fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "engines/tensorrt/gpu_cast.h"
/**
* The 'input'/'output' parameters are 4D tensors of shape (Batch, Channels, Width, Height)
* The input_size is Channels * Width * Height.
*
* blockIdx.x,y,z is the block index
* blockDim.x,y,z is the number of threads in a block
* threadIdx.x,y,z is the thread index within the block
*/
__global__
void gpu_cast_impl(int batch_size, int input_size, unsigned char* __restrict__ input, float* __restrict__ output) {
const int start_pos = blockDim.x * blockIdx.x;
const int last_pos = blockDim.x * (blockIdx.x + 1);
for (int i=start_pos; i<last_pos; ++i) {
output[i] = static_cast<float>(input[i]);
}
}
void gpu_cast(int batch_size, int input_size, unsigned char* input, float* output, hipStream_t stream) {
const int nblocks = batch_size;
const int threads_per_block = 1;
hipLaunchKernelGGL(( gpu_cast_impl), dim3(nblocks), dim3(threads_per_block), 0, stream, batch_size, input_size, input, output);
}
| 432596ceded179c3e6b968c7ba310ac524f954fb.cu | #include "engines/tensorrt/gpu_cast.h"
/**
* The 'input'/'output' parameters are 4D tensors of shape (Batch, Channels, Width, Height)
* The input_size is Channels * Width * Height.
*
* blockIdx.x,y,z is the block index
* blockDim.x,y,z is the number of threads in a block
* threadIdx.x,y,z is the thread index within the block
*/
__global__
void gpu_cast_impl(int batch_size, int input_size, unsigned char* __restrict__ input, float* __restrict__ output) {
const int start_pos = blockDim.x * blockIdx.x;
const int last_pos = blockDim.x * (blockIdx.x + 1);
for (int i=start_pos; i<last_pos; ++i) {
output[i] = static_cast<float>(input[i]);
}
}
void gpu_cast(int batch_size, int input_size, unsigned char* input, float* output, cudaStream_t stream) {
const int nblocks = batch_size;
const int threads_per_block = 1;
gpu_cast_impl<<<nblocks, threads_per_block, 0, stream>>>(batch_size, input_size, input, output);
}
|
f68a6d8c24e5d929cedb8bd997c33d2df710637f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <assert.h>
#include <iomanip>
#include <conio.h>
//#include <cutil.h> // cutil32.lib
//#include <cutil_math.h> // cutil32.lib
#include <GL/glut.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include <hip/driver_types.h>
#include "fluid_system_host.cuh"
#include "fluid_system_kern.cuh"
#include "radixsort.cu" // Build in RadixSort
#include "thrust\device_vector.h" //thrust libs
#include "thrust\sort.h"
#include "thrust\host_vector.h"
#include "rocblas.h"
FluidParams fcuda;
bufList fbuf;
//initialInfo elasticInfo;
__device__ FluidParams simData;
__device__ uint gridActive;
__device__ int flagNumFT; //for transfer
__device__ int pNumFT; //for transfer
#define BLOCK_SIZE 256
#define LOCAL_PMAX 896
#define NUM_CELL 27
#define LAST_CELL 26
#define CENTER_CELL 13
float** g_scanBlockSums;
int** g_scanBlockSumsInt;
unsigned int g_numEltsAllocated = 0;
unsigned int g_numLevelsAllocated = 0;
void cudaExit (int argc, char **argv)
{
exit(EXIT_SUCCESS);
//CUT_EXIT(argc, argv);
}
void cudaInit(int argc, char **argv)
{
//CUT_DEVICE_INIT(argc, argv);
findCudaDevice(argc, (const char **)argv);
hipDeviceProp_t p;
hipGetDeviceProperties ( &p, 0);
printf ( "-- CUDA --\n" );
printf ( "Name: %s\n", p.name );
printf ( "Revision: %d.%d\n", p.major, p.minor );
printf ( "Global Mem: %d\n", p.totalGlobalMem );
printf ( "Shared/Blk: %d\n", p.sharedMemPerBlock );
printf ( "Regs/Blk: %d\n", p.regsPerBlock );
printf ( "Warp Size: %d\n", p.warpSize );
printf ( "Mem Pitch: %d\n", p.memPitch );
printf ( "Thrds/Blk: %d\n", p.maxThreadsPerBlock );
printf ( "Const Mem: %d\n", p.totalConstMem );
printf ( "Clock Rate: %d\n", p.clockRate );
fbuf.mgridactive = 0x0;
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mpos, sizeof(float)*3 ) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.maccel, sizeof(float)*3) );
checkCudaErrors ( hipMalloc((void**)&fbuf.vel_mid, sizeof(float) * 3));
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mveleval, sizeof(float)*3) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mforce, sizeof(float)*3) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.poroForce, sizeof(float) * 3));
checkCudaErrors(hipMalloc((void**)&fbuf.fluidForce, sizeof(float) * 3));
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mpress, sizeof(float) ) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mdensity, sizeof(float) ) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mgcell, sizeof(uint)) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mgndx, sizeof(uint)) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mclr, sizeof(uint)) );
checkCudaErrors ( hipMalloc((void**)&fbuf.delta_density, sizeof(float)));
checkCudaErrors ( hipMalloc((void**)&fbuf.aii, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.pressForce, sizeof(float) * 3));
checkCudaErrors ( hipMalloc((void**)&fbuf.rest_volume, sizeof(float)));
checkCudaErrors ( hipMalloc((void**)&fbuf.volume, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.rest_colorValue, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.colorValue, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.colorTensor, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.source, sizeof(float)));
checkCudaErrors ( hipMalloc ( (void**) &fbuf.msortbuf, sizeof(uint) ) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mgrid, 1 ) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mgridcnt, 1 ) );
//new sort
checkCudaErrors ( hipMalloc ( (void**) &fbuf.midsort, 1 ) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mgridoff, 1 ) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mgridactive, 1 ) );
//checkCudaErrors ( hipMalloc ( (void**) &fbuf.mcluster, sizeof(uint) ) );
//implicit SPH formulation for elastic body
checkCudaErrors ( hipMalloc ( (void**) &fbuf.gradDeform, 1 ));
checkCudaErrors ( hipMalloc ( (void**) &fbuf.Rotation, 1));
//checkCudaErrors ( hipMalloc ( (void**) &fbuf.mf_fluidPercent, sizeof(float)));
//checkCudaErrors ( hipMalloc ( (void**) &fbuf.poroDriftVel, sizeof(float3)));
//checkCudaErrors ( hipMalloc ( (void**) &fbuf.percentChange, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.divDarcyFlux, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.isInside, sizeof(bool)));
//checkCudaErrors ( hipMalloc ( (void**) &fbuf.CorrectL, 1 ) );
checkCudaErrors(hipMalloc((void**)&fbuf.SurfaceForce, sizeof(float3)));
//elastic information
checkCudaErrors(hipMalloc((void**)&fbuf.elasticID, sizeof(uint)));
checkCudaErrors(hipMalloc((void**)&fbuf.particleID, sizeof(uint)));
checkCudaErrors(hipMalloc((void**)&fbuf.initialVolume, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.neighborID, sizeof(uint)));
checkCudaErrors(hipMalloc((void**)&fbuf.neighborDistance, sizeof(float3)));
checkCudaErrors(hipMalloc((void**)&fbuf.kernelGrad, sizeof(float3)));
checkCudaErrors(hipMalloc((void**)&fbuf.kernelRotate, sizeof(float3)));
checkCudaErrors(hipMalloc((void**)&fbuf.neighborNum, sizeof(uint)));
checkCudaErrors(hipMalloc((void**)&fbuf.neighborIndex, sizeof(uint)));
//checkCudaErrors(hipMalloc((void**)&fbuf.colorField, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.volumetricStrain, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.normal, sizeof(float3)));
checkCudaErrors(hipMalloc((void**)&fbuf.isHead, sizeof(int)));
checkCudaErrors(hipMalloc((void**)&fbuf.frame, sizeof(int)));
checkCudaErrors(hipMalloc((void**)&fbuf.bx, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.by, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.bz, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.vx, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.vy, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.vz, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.rx, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.ry, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.rz, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.r2x, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.r2y, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.r2z, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.px, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.py, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.pz, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.Apx, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.Apy, sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.Apz, sizeof(float)));
//porous
//checkCudaErrors(hipMalloc((void**)&fbuf.porosity, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.density_solid, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.pressure_water, sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.totalDis, sizeof(float)*MAX_SOLIDNUM));
checkCudaErrors(hipMalloc((void**)&fbuf.solidCount, sizeof(int)));
//checkCudaErrors(hipMalloc((void**)&fbuf.Saturation, sizeof(float)));
//checkCudaErrors(hipMalloc((void**)&fbuf.AbsorbedFluidVolume, sizeof(float)));
//checkCudaErrors(hipMalloc((void**)&fbuf.Saturation, sizeof(float)));
//checkCudaErrors(hipMalloc((void**)&fbuf.DeltaSaturation, sizeof(float)));
//checkCudaErrors(hipMalloc((void**)&fbuf.elasticVolume, sizeof(float)));
//checkCudaErrors(hipMalloc((void**)&fbuf.gradPressure, sizeof(float3)));
checkCudaErrors(hipMalloc((void**)&fbuf.poroVel, sizeof(float3)));
//checkCudaErrors(hipMalloc((void**)&fbuf.fluidVel, sizeof(float3)));
preallocBlockSumsInt ( 1 );
};
int iDivUp (int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
inline bool isPowerOfTwo(int n) { return ((n&(n-1))==0) ; }
inline int floorPow2(int n) {
#ifdef WIN32
return 1 << (int)logb((float)n);
#else
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
#endif
}
// Compute number of blocks to create
void computeNumBlocks (int numPnts, int maxThreads, int &numBlocks, int &numThreads)
{
numThreads = min( maxThreads, numPnts );
numBlocks = iDivUp ( numPnts, numThreads );
}
void FluidClearCUDA ()
{
checkCudaErrors ( hipFree ( fbuf.mpos ) );
checkCudaErrors ( hipFree ( fbuf.maccel ) );
checkCudaErrors ( hipFree ( fbuf.vel_mid));
checkCudaErrors ( hipFree ( fbuf.mveleval ) );
checkCudaErrors ( hipFree ( fbuf.mforce ) );
checkCudaErrors ( hipFree ( fbuf.poroForce));
checkCudaErrors(hipFree(fbuf.fluidForce));
checkCudaErrors ( hipFree ( fbuf.mpress ) );
checkCudaErrors ( hipFree ( fbuf.mdensity ) );
checkCudaErrors ( hipFree ( fbuf.mgcell ) );
checkCudaErrors ( hipFree ( fbuf.mgndx ) );
checkCudaErrors ( hipFree ( fbuf.mclr ) );
#ifdef NEW_BOUND
checkCudaErrors ( hipFree ( fbuf.misbound ) );
#endif
//checkCudaErrors ( hipFree ( fbuf.mcluster ) );
//multi fluid
checkCudaErrors ( hipFree ( fbuf.mf_alpha ) );
checkCudaErrors ( hipFree ( fbuf.mf_alpha_next ) );
//checkCudaErrors ( hipFree ( fbuf.mf_pressure_modify ) );
checkCudaErrors ( hipFree ( fbuf.mf_vel_phrel) );
checkCudaErrors ( hipFree ( fbuf.mf_restdensity ) );
checkCudaErrors ( hipFree ( fbuf.mf_restdensity_out));
checkCudaErrors ( hipFree ( fbuf.mf_restmass ) );
checkCudaErrors ( hipFree ( fbuf.mf_alpha_sum));
checkCudaErrors ( hipFree ( fbuf.mf_visc ) );
//checkCudaErrors ( hipFree ( fbuf.mf_velxcor ) );
//checkCudaErrors ( hipFree ( fbuf.mf_alphagrad ) );
checkCudaErrors(hipFree(fbuf.mf_alphachange));
//checkCudaErrors ( hipFree ( fbuf.density_fluid ) );
checkCudaErrors ( hipFree ( fbuf.msortbuf ) );
checkCudaErrors ( hipFree ( fbuf.mgrid ) );
checkCudaErrors ( hipFree ( fbuf.mgridcnt ) );
//new sort
checkCudaErrors ( hipFree ( fbuf.midsort ) );
checkCudaErrors ( hipFree ( fbuf.mgridoff ) );
checkCudaErrors ( hipFree ( fbuf.mgridactive ) );
//an implicit SPH formulation for elastic body
checkCudaErrors ( hipFree(fbuf.gradDeform));
checkCudaErrors ( hipFree(fbuf.elasticID));
checkCudaErrors ( hipFree(fbuf.Rotation));
//checkCudaErrors ( hipFree(fbuf.mf_fluidPercent));
//checkCudaErrors ( hipFree(fbuf.poroDriftVel));
//checkCudaErrors ( hipFree(fbuf.percentChange));
checkCudaErrors(hipFree(fbuf.divDarcyFlux));
checkCudaErrors(hipFree(fbuf.isInside));
//checkCudaErrors(hipFree(fbuf.CorrectL));
checkCudaErrors(hipFree(fbuf.SurfaceForce));
//elastic information
checkCudaErrors(hipFree(fbuf.particleID));
checkCudaErrors(hipFree(fbuf.initialVolume));
checkCudaErrors(hipFree(fbuf.neighborNum));
checkCudaErrors(hipFree(fbuf.neighborID));
checkCudaErrors(hipFree(fbuf.neighborDistance));
checkCudaErrors(hipFree(fbuf.kernelGrad));
checkCudaErrors(hipFree(fbuf.kernelRotate));
checkCudaErrors(hipFree(fbuf.neighborIndex));
//checkCudaErrors(hipFree(fbuf.colorField));
checkCudaErrors(hipFree(fbuf.volumetricStrain));
checkCudaErrors(hipFree(fbuf.bx)); checkCudaErrors(hipFree(fbuf.by)); checkCudaErrors(hipFree(fbuf.bz));
checkCudaErrors(hipFree(fbuf.vx)); checkCudaErrors(hipFree(fbuf.vy)); checkCudaErrors(hipFree(fbuf.vz));
checkCudaErrors(hipFree(fbuf.rx)); checkCudaErrors(hipFree(fbuf.ry)); checkCudaErrors(hipFree(fbuf.rz));
checkCudaErrors(hipFree(fbuf.r2x)); checkCudaErrors(hipFree(fbuf.r2y)); checkCudaErrors(hipFree(fbuf.r2z));
checkCudaErrors(hipFree(fbuf.px)); checkCudaErrors(hipFree(fbuf.py)); checkCudaErrors(hipFree(fbuf.pz));
checkCudaErrors(hipFree(fbuf.Apx)); checkCudaErrors(hipFree(fbuf.Apy)); checkCudaErrors(hipFree(fbuf.Apz));
checkCudaErrors(hipFree(fbuf.normal));
checkCudaErrors(hipFree(fbuf.isHead));
checkCudaErrors(hipFree(fbuf.frame));
checkCudaErrors(hipFree(fbuf.isSurface));
//porous
//checkCudaErrors(hipFree(fbuf.porosity));
checkCudaErrors(hipFree(fbuf.density_solid));
checkCudaErrors(hipFree(fbuf.pressure_water));
checkCudaErrors(hipFree(fbuf.solidCount));
checkCudaErrors(hipFree(fbuf.totalDis));
//checkCudaErrors(hipFree(fbuf.AbsorbedFluidVolume));
//checkCudaErrors(hipFree(fbuf.Saturation));
//checkCudaErrors(hipFree(fbuf.DeltaSaturation));
//checkCudaErrors(hipFree(fbuf.elasticVolume));
//checkCudaErrors(hipFree(fbuf.gradPressure));
checkCudaErrors(hipFree(fbuf.poroVel));
//checkCudaErrors(hipFree(fbuf.fluidVel));
//IISPH
checkCudaErrors(hipFree(fbuf.aii));
checkCudaErrors(hipFree(fbuf.pressForce));
checkCudaErrors(hipFree(fbuf.delta_density));
//pressure boundary for IISPH
checkCudaErrors(hipFree(fbuf.volume));
checkCudaErrors(hipFree(fbuf.rest_volume));
checkCudaErrors(hipFree(fbuf.source));
checkCudaErrors(hipFree(fbuf.colorValue));
checkCudaErrors(hipFree(fbuf.rest_colorValue));
}
void FluidSetupRotationCUDA ( float pan_r,float omega,int loadwhich, float capillaryForceRatio)
{
fcuda.pan_r = pan_r;
fcuda.omega = omega;
fcuda.loadwhich = loadwhich;
fcuda.capillaryForceRatio = capillaryForceRatio;
}
float FluidSetupCUDA ( int num, int gsrch, int3 res, float3 size, float3 delta, float3 gmin, float3 gmax, int total, int chk)
{
float cudaMem = 0;
fcuda.pnum = num;
fcuda.gridRes = res;
fcuda.gridSize = size;
fcuda.gridDelta = delta;
fcuda.gridMin = gmin;
fcuda.gridMax = gmax;
fcuda.gridTotal = total;
fcuda.gridSrch = gsrch;
fcuda.gridAdjCnt = gsrch*gsrch*gsrch;
fcuda.gridScanMax = res;
fcuda.gridScanMax -= make_int3( fcuda.gridSrch, fcuda.gridSrch, fcuda.gridSrch );
fcuda.chk = chk;
fcuda.mf_up=0;
// Build Adjacency Lookup
int cell = 0;
for (int y=0; y < gsrch; y++ )
for (int z=0; z < gsrch; z++ )
for (int x=0; x < gsrch; x++ )
fcuda.gridAdj [ cell++] = ( y * fcuda.gridRes.z+ z )*fcuda.gridRes.x + x ;
printf ( "CUDA Adjacency Table\n");
for (int n=0; n < fcuda.gridAdjCnt; n++ ) {
printf ( " ADJ: %d, %d\n", n, fcuda.gridAdj[n] );
}
// Compute number of blocks and threads
computeNumBlocks ( fcuda.pnum, 384, fcuda.numBlocks, fcuda.numThreads); // particles
computeNumBlocks ( fcuda.gridTotal, 384, fcuda.gridBlocks, fcuda.gridThreads); // grid cell
// Allocate particle buffers
fcuda.szPnts = (fcuda.numBlocks * fcuda.numThreads);
printf ( "CUDA Allocate: \n" );
printf ( " Pnts: %d, t:%dx%d=%d, Size:%d\n", fcuda.pnum, fcuda.numBlocks, fcuda.numThreads, fcuda.numBlocks*fcuda.numThreads, fcuda.szPnts);
printf ( " Grid: %d, t:%dx%d=%d, bufGrid:%d, Res: %dx%dx%d\n", fcuda.gridTotal, fcuda.gridBlocks, fcuda.gridThreads, fcuda.gridBlocks*fcuda.gridThreads, fcuda.szGrid, (int) fcuda.gridRes.x, (int) fcuda.gridRes.y, (int) fcuda.gridRes.z );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mpos, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(hipMalloc((void**)&fbuf.mveleval, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(hipMalloc((void**)&fbuf.mpress, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.mgcell, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
checkCudaErrors(hipMalloc((void**)&fbuf.mgndx, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
int temp_size = EMIT_BUF_RATIO*(2 * (sizeof(float) * 3) + sizeof(float)+ 2 *sizeof(uint));
#ifdef NEW_BOUND
checkCudaErrors ( hipMalloc ( (void**) &fbuf.misbound, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(int)) );
temp_size += EMIT_BUF_RATIO*sizeof(int);
#endif
checkCudaErrors(hipMalloc((void**)&fbuf.isInside, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(bool)));
temp_size += EMIT_BUF_RATIO * sizeof(bool);
//multi fluid
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mf_alpha, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(float)*MAX_FLUIDNUM )); //float* num
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mf_alpha_next, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(float)*MAX_FLUIDNUM ) ); //float* num
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mf_restmass, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
temp_size += EMIT_BUF_RATIO*(2*MAX_FLUIDNUM*sizeof(float) + sizeof(float));
checkCudaErrors ( hipMalloc ( (void**) &fbuf.MFtype, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(int) ) ); //indicator function
temp_size += EMIT_BUF_RATIO*(sizeof(int));
//an implicit SPH formulation for elastic body
checkCudaErrors ( hipMalloc ( (void**)&fbuf.elasticID, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
checkCudaErrors ( hipMalloc ( (void**)&fbuf.mf_beta, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)*MAX_FLUIDNUM * MAX_SOLIDNUM));
checkCudaErrors ( hipMalloc ( (void**)&fbuf.mf_beta_next, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * MAX_FLUIDNUM * MAX_SOLIDNUM));
temp_size += EMIT_BUF_RATIO*(2*sizeof(float)*MAX_FLUIDNUM* MAX_SOLIDNUM +sizeof(uint));
checkCudaErrors ( hipMalloc ( (void**) &fbuf.msortbuf, EMIT_BUF_RATIO*fcuda.szPnts*temp_size ) );
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts*temp_size * 2;
//no sort values
checkCudaErrors(hipMalloc((void**)&fbuf.density_solid, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.gradDeform, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 9));
checkCudaErrors(hipMalloc((void**)&fbuf.Rotation, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 9));
checkCudaErrors(hipMalloc((void**)&fbuf.mf_vel_phrel, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3 * MAX_FLUIDNUM)); //float*3*num
checkCudaErrors(hipMalloc((void**)&fbuf.mf_restdensity, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.mf_restdensity_out, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts *(21 * sizeof(float) + sizeof(float) * 3 * MAX_FLUIDNUM);
checkCudaErrors(hipMalloc((void**)&fbuf.mf_alpha_sum, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.mf_visc, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.maccel, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(hipMalloc((void**)&fbuf.mforce, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(hipMalloc((void**)&fbuf.mdensity, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.mgcell, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
checkCudaErrors(hipMalloc((void**)&fbuf.mgndx, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
checkCudaErrors(hipMalloc((void**)&fbuf.mclr, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts * (12 * sizeof(float));
checkCudaErrors(hipMalloc((void**)&fbuf.mf_alphachange, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)*MAX_FLUIDNUM)); //float* num
checkCudaErrors(hipMalloc((void**)&fbuf.vel_mid, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(hipMalloc((void**)&fbuf.poroForce, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(hipMalloc((void**)&fbuf.fluidForce, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts * (9 * sizeof(float) + sizeof(float)*MAX_FLUIDNUM);
checkCudaErrors(hipMalloc((void**)&fbuf.pressure_water, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM));
checkCudaErrors(hipMalloc((void**)&fbuf.gradPressure, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float3)*MAX_FLUIDNUM*MAX_SOLIDNUM));
checkCudaErrors(hipMalloc((void**)&fbuf.totalDis, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)*MAX_SOLIDNUM));
checkCudaErrors(hipMalloc((void**)&fbuf.solidCount, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(int)*MAX_SOLIDNUM));
checkCudaErrors(hipMalloc((void**)&fbuf.divDarcyFlux, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM));
//checkCudaErrors(hipMalloc((void**)&fbuf.isInside, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts * ( sizeof(float) + 2* sizeof(float)*MAX_SOLIDNUM + 5 * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM);
checkCudaErrors(hipMalloc((void**)&fbuf.SurfaceForce, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float3)));
checkCudaErrors(hipMalloc((void**)&fbuf.aii, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.delta_density, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.pressForce, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts * (8 * sizeof(float));
//pressure boundary for IISPH
checkCudaErrors(hipMalloc((void**)&fbuf.rest_volume, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.volume, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.source, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.colorValue, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.rest_colorValue, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.colorTensor, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 9));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts * (14 * sizeof(float));
checkCudaErrors(hipMalloc((void**)&fbuf.poroVel, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float3)*MAX_FLUIDNUM*MAX_SOLIDNUM));
// Allocate grid
fcuda.szGrid = (fcuda.gridBlocks * fcuda.gridThreads);
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mgrid, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(int) ) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mgridcnt, fcuda.szGrid*sizeof(int) ) );
//new sort
checkCudaErrors ( hipMalloc ( (void**) &fbuf.midsort, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(uint) ) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mgridoff, fcuda.szGrid*sizeof(int) ) );
checkCudaErrors ( hipMalloc ( (void**) &fbuf.mgridactive, fcuda.szGrid*sizeof(int) ) );
checkCudaErrors ( hipMemcpyToSymbol ( simData, &fcuda, sizeof(FluidParams) ) );
hipDeviceSynchronize ();
// Prefix Sum - Preallocate Block sums for Sorting
deallocBlockSumsInt ();
preallocBlockSumsInt ( fcuda.gridTotal );
return cudaMem;
}
float ElasticSetupCUDA(int num,float miu,float lambda,float porosity,float* permeabilityRatio,int maxNeighborNum, float *pressRatio, float stRatio)
{
float CudaMem = 0;
fcuda.numElasticPoints = num;
fcuda.maxNeighborNum = maxNeighborNum;
printf("max neighbor num is %d\n",maxNeighborNum);
fcuda.miu = miu;
fcuda.lambda = lambda;
fcuda.rest_porosity = porosity;
fcuda.stRatio = stRatio;
for (int i = 0; i < MAX_FLUIDNUM*MAX_SOLIDNUM; ++i)
{
fcuda.mf_permeability[i] = permeabilityRatio[i];
//printf("permeability %d:%15f\n", i, permeabilityRatio[i]);
std::cout << "permeability " << i << ":" << 10000000000*permeabilityRatio[i];
fcuda.pressRatio[i] = pressRatio[i];
printf("pressure ratio:%f\n", fcuda.pressRatio[i]);
}
//fcuda.rest_permeability = permeability;
//elastic information
checkCudaErrors(hipMalloc((void**)&fbuf.particleID, fcuda.numElasticPoints *sizeof(int)));
checkCudaErrors(hipMalloc((void**)&fbuf.neighborNum, fcuda.numElasticPoints * sizeof(uint)));
checkCudaErrors(hipMalloc((void**)&fbuf.initialVolume, fcuda.numElasticPoints *sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.normal, fcuda.numElasticPoints * sizeof(float3)));
checkCudaErrors(hipMalloc((void**)&fbuf.isSurface, fcuda.numElasticPoints * sizeof(int)));
checkCudaErrors(hipMalloc((void**)&fbuf.isHead, fcuda.numElasticPoints * sizeof(int)));
checkCudaErrors(hipMalloc((void**)&fbuf.frame, fcuda.numElasticPoints * sizeof(int)));
checkCudaErrors(hipMalloc((void**)&fbuf.volumetricStrain, fcuda.numElasticPoints * sizeof(float)));
CudaMem += fcuda.numElasticPoints * (7 * sizeof(float) + sizeof(float3));
checkCudaErrors(hipMalloc((void**)&fbuf.bx, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.by, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.bz, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.vx, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.vy, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.vz, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.rx, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.ry, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.rz, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.r2x, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.r2y, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.r2z, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.px, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.py, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.pz, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.Apx, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.Apy, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(hipMalloc((void**)&fbuf.Apz, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(hipMalloc((void**)&fbuf.neighborID, fcuda.numElasticPoints *sizeof(uint)* maxNeighborNum));
checkCudaErrors(hipMalloc((void**)&fbuf.kernelRotate, fcuda.numElasticPoints * sizeof(float3) * maxNeighborNum));
checkCudaErrors(hipMalloc((void**)&fbuf.neighborDistance, fcuda.numElasticPoints *sizeof(float3)* maxNeighborNum));
checkCudaErrors(hipMalloc((void**)&fbuf.kernelGrad, fcuda.numElasticPoints * sizeof(float3) * maxNeighborNum));
checkCudaErrors(hipMalloc((void**)&fbuf.neighborIndex, fcuda.numElasticPoints * sizeof(uint) * maxNeighborNum));
CudaMem += fcuda.numElasticPoints *maxNeighborNum*(2 * sizeof(uint) + 3 * sizeof(float3));
hipDeviceSynchronize();
return CudaMem;
}
void PorousParamCUDA(float bulkModulus_porous, float bulkModulus_grains, float bulkModulus_solid, float bulkModulus_fluid, float poroDeformStrength, float capillary, float relax2)
{
fcuda.bulkModulus_porous = bulkModulus_porous;
fcuda.bulkModulus_grains = bulkModulus_grains;
fcuda.bulkModulus_solid = bulkModulus_solid;
fcuda.bulkModulus_fluid = bulkModulus_fluid;
fcuda.poroDeformStrength = poroDeformStrength;
fcuda.relax2 = relax2;
float alpha = 1 - bulkModulus_porous / bulkModulus_grains;
fcuda.CoCompressibility = bulkModulus_solid*bulkModulus_fluid / ((alpha - fcuda.rest_porosity)*bulkModulus_fluid + fcuda.rest_porosity*bulkModulus_solid);
fcuda.capillary = capillary;
printf("CoCompressibility is %f\n", fcuda.CoCompressibility);
}
void FluidParamCUDA ( float ss, float sr, float pr, float mass, float rest, float3 bmin, float3 bmax, float estiff, float istiff,float pbstiff, float visc, float damp, float fmin, float fmax, float ffreq, float gslope, float gx, float gy, float gz, float al, float vl )
{
fcuda.psimscale = ss;
fcuda.psmoothradius = sr;
fcuda.pradius = pr;
fcuda.r2 = sr * sr;
fcuda.pmass = mass;
fcuda.prest_dens = rest;
fcuda.pboundmin = bmin;
fcuda.pboundmax = bmax;
fcuda.pextstiff = estiff;
fcuda.pintstiff = istiff;
fcuda.pbstiff = pbstiff;
fcuda.pvisc = visc;
fcuda.pdamp = damp;
fcuda.pforce_min = fmin;
fcuda.pforce_max = fmax;
fcuda.pforce_freq = ffreq;
fcuda.pground_slope = gslope;
fcuda.pgravity = make_float3( gx, gy, gz );
fcuda.AL = al;
fcuda.AL2 = al * al;
fcuda.VL = vl;
fcuda.VL2 = vl * vl;
printf ( "Bound Min: %f %f %f\n", bmin.x, bmin.y, bmin.z );
printf ( "Bound Max: %f %f %f\n", bmax.x, bmax.y, bmax.z );
fcuda.pdist = pow ( fcuda.pmass / fcuda.prest_dens, 1/3.0f );
fcuda.poly6kern = 315.0f / (64.0f * 3.141592 * pow( sr, 9.0f) );
fcuda.spikykern = -45.0f / (3.141592 * pow( sr, 6.0f) );
fcuda.lapkern = 45.0f / (3.141592 * pow( sr, 6.0f) );
//fcuda.CubicSplineKern1 = 1 / (4 * 3.141592*pow(sr, 3));
//fcuda.CubicSplineKern2 = 1 / (3.141592*pow(sr, 3));
fcuda.CubicSplineKern = 8 / (3.141592*pow(sr, 3));
fcuda.gradCubicSplineKern = 48 / (3.141592*pow(sr, 4));
fcuda.CubicSplineKern1 = 1 / (4 * 3.141592*pow(sr, 3));
fcuda.CubicSplineKern2 = 8 / (3.141592*pow(sr, 3));
fcuda.gradCubicSplineKern1 = -3 / (4 * 3.141592*pow(sr, 4));
fcuda.gradCubicSplineKern2 = 1 / (3.141592*pow(sr, 4));
//printf("fcuda.gradCubicSplineKern1 is %f,fcuda.gradCubicSplineKern2 is %f,fcuda.spikykern is %f\n",
// fcuda.gradCubicSplineKern1, fcuda.gradCubicSplineKern2, fcuda.spikykern);
checkCudaErrors( hipMemcpyToSymbol ( simData, &fcuda, sizeof(FluidParams) ) );
hipDeviceSynchronize ();
}
void ParamUpdateCUDA(bool hidebound, bool hidefluid, bool hidesolid, bool hiderigid, float* colorValue)
{
fcuda.HideBound = hidebound;
fcuda.HideFluid = hidefluid;
fcuda.HideSolid = hidesolid;
fcuda.HideRigid = hiderigid;
for(int i=0;i<MAX_FLUIDNUM;++i)
fcuda.colorValue[i] = colorValue[i];
checkCudaErrors(hipMemcpyToSymbol(simData, &fcuda, sizeof(FluidParams)));
hipDeviceSynchronize();
}
void FluidParamCUDA_projectu(float visc_factor, float fluid_pfactor,float solid_pfactor,float bdamp)
{
fcuda.visc_factor = visc_factor;
fcuda.fluid_pfactor = fluid_pfactor;
fcuda.solid_pfactor = solid_pfactor;
fcuda.bdamp = bdamp;
fcuda.gravityfree = 0;
}
void FluidMfParamCUDA ( float *dens, float *visc, float *mass, float diffusion, float catnum, float dt, float3 cont, float3 mb1,float3 mb2, float relax,int example)
{
fcuda.mf_catnum = catnum;
fcuda.mf_diffusion = diffusion;
fcuda.mf_dt = dt;
for(int i=0;i<MAX_FLUIDNUM;i++)
{
fcuda.mf_dens[i] = dens[i];
fcuda.mf_visc[i] = visc[i];
fcuda.mf_mass[i] = mass[i];
}
fcuda.mf_multiFlagPNum = 0;
//fcuda.mf_splitVolume = splitV;
//fcuda.mf_mergeVolume = mergeV;
fcuda.mf_maxPnum = fcuda.pnum * EMIT_BUF_RATIO;
fcuda.cont = cont.x; fcuda.cont1 = cont.y; fcuda.cont2 = cont.z;
fcuda.mb1.x = mb1.x; fcuda.mb1.y = mb1.y; fcuda.mb1.z = mb1.z;
fcuda.mb2.x = mb2.x; fcuda.mb2.y = mb2.y; fcuda.mb2.z = mb2.z;
fcuda.bxmin = mb1.x; fcuda.by = mb1.y; fcuda.bzmin = mb1.z;
fcuda.bxmax = mb2.x; fcuda.bzmax = mb2.z;
fcuda.relax = relax;
fcuda.example = example;
checkCudaErrors( hipMemcpyToSymbol ( simData, &fcuda, sizeof(FluidParams) ) );
hipDeviceSynchronize ();
}
void preallocBlockSumsInt (unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) level++;
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSumsInt = (int**) malloc(level * sizeof(int*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) checkCudaErrors ( hipMalloc((void**) &g_scanBlockSumsInt[level++], numBlocks * sizeof(int)) );
numElts = numBlocks;
} while (numElts > 1);
}
void deallocBlockSumsInt()
{
for (unsigned int i = 0; i < g_numLevelsAllocated; i++) hipFree(g_scanBlockSumsInt[i]);
free( (void**)g_scanBlockSumsInt );
g_scanBlockSumsInt = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
//Copy buffers
void CopyToCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr)
{
// Send particle buffers
int numPoints = fcuda.pnum;
checkCudaErrors( hipMemcpy ( fbuf.mpos, pos, numPoints*sizeof(float)*3, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.maccel, vel, numPoints*sizeof(float)*3, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mveleval, veleval, numPoints*sizeof(float)*3, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mforce, force, numPoints*sizeof(float)*3, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mpress, pressure, numPoints*sizeof(float), hipMemcpyHostToDevice ) );
//checkCudaErrors( hipMemcpy ( fbuf.mpress_pre, pressure, numPoints * sizeof(float), hipMemcpyHostToDevice));
checkCudaErrors( hipMemcpy ( fbuf.mdensity, density, numPoints*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mclr, clr, numPoints*sizeof(uint), hipMemcpyHostToDevice ) );
hipDeviceSynchronize ();
}
void CopyMfToCUDA ( float* alpha, float* alpha_pre, float* pressure_modify, float* vel_phrel, float* restmass, float* restdensity, float* visc, float* velxcor, float* alphagrad)
{
// Send particle buffers
int numPoints = fcuda.pnum;
checkCudaErrors( hipMemcpy ( fbuf.mf_alpha, alpha, numPoints*MAX_FLUIDNUM*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mf_alpha_next, alpha, numPoints*MAX_FLUIDNUM*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mf_vel_phrel, vel_phrel, numPoints*MAX_FLUIDNUM*sizeof(float)*3, hipMemcpyHostToDevice ) );
//checkCudaErrors( hipMemcpy ( fbuf.mf_alphagrad, alphagrad, numPoints*MAX_FLUIDNUM*sizeof(float)*3, hipMemcpyHostToDevice ) );
//checkCudaErrors( hipMemcpy ( fbuf.mf_pressure_modify, pressure_modify, numPoints*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mf_restmass, restmass, numPoints*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mf_restdensity, restdensity, numPoints*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mf_visc, visc, numPoints*sizeof(float), hipMemcpyHostToDevice ) );
//checkCudaErrors( hipMemcpy ( fbuf.mf_velxcor, velxcor, numPoints*sizeof(float)*3, hipMemcpyHostToDevice ) );
//checkCudaErrors( hipMemcpy ( fbuf.MFtype, mftype, numPoints*sizeof(int), hipMemcpyHostToDevice ) );
hipDeviceSynchronize ();
}
void CopyBoundToCUDA (int* isbound )
{
int numPoints = fcuda.pnum;
checkCudaErrors( hipMemcpy ( fbuf.misbound, isbound, numPoints*sizeof(int), hipMemcpyHostToDevice ) );
hipDeviceSynchronize ();
}
void CopyToCUDA_Uproject(int* mftype)
{
int numPoints = fcuda.pnum;
checkCudaErrors( hipMemcpy( fbuf.MFtype, mftype, numPoints*sizeof(int), hipMemcpyHostToDevice));
hipDeviceSynchronize ();
}
void CopyToCUDA_elastic(uint* elasticID,float* porosity,float*signDistance)
{
int numPoints = fcuda.pnum;
int numElasticPoints = fcuda.numElasticPoints;
checkCudaErrors(hipMemcpy(fbuf.elasticID, elasticID, numPoints * sizeof(uint), hipMemcpyHostToDevice));
//checkCudaErrors(hipMemcpy(fbuf.porosity, porosity, numElasticPoints * sizeof(float), hipMemcpyHostToDevice));
//checkCudaErrors(hipMemcpy(fbuf.colorField, signDistance, numElasticPoints * sizeof(float), hipMemcpyHostToDevice));
hipDeviceSynchronize();
}
void CopyFromCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr, int mode)
{
// Return particle buffers
int numPoints = fcuda.pnum;
//printf("sizeof(float3) is %d and sizeof(float) is %d\n", sizeof(float3), sizeof(float));
//printf("fbuf.mpos address : OX%p\n", fbuf.mpos);
//printf("numPoints is %d\n", numPoints);
if ( pos != 0x0 ) checkCudaErrors( hipMemcpy ( pos, fbuf.mpos, numPoints*sizeof(float)*3, hipMemcpyDeviceToHost ) );
if ( clr != 0x0 ) checkCudaErrors( hipMemcpy ( clr, fbuf.mclr, numPoints*sizeof(uint), hipMemcpyDeviceToHost ) );
if( mode == 2){
checkCudaErrors( hipMemcpy ( vel, fbuf.maccel, numPoints*sizeof(float)*3, hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy ( veleval, fbuf.mveleval, numPoints*sizeof(float)*3, hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy ( force, fbuf.mforce, numPoints*sizeof(float)*3, hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy ( pressure, fbuf.mpress, numPoints*sizeof(float), hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy ( density, fbuf.mdensity, numPoints*sizeof(float), hipMemcpyDeviceToHost ) );
}
hipDeviceSynchronize ();
}
void CopyMfFromCUDA ( float* alpha, float* alpha_pre, float* pressure_modify, float* vel_phrel, float* restmass, float* restdensity, float* visc, float* velxcor, float* alphagrad, int mode)
{
int numPoints = fcuda.pnum;
checkCudaErrors( hipMemcpy ( alpha, fbuf.mf_alpha, numPoints*MAX_FLUIDNUM*sizeof(float), hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy ( restmass, fbuf.mf_restmass, numPoints*sizeof(float), hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy ( restdensity, fbuf.mf_restdensity, numPoints*sizeof(float), hipMemcpyDeviceToHost ) );
if( mode == 2){
// Send particle buffers
checkCudaErrors( hipMemcpy ( alpha_pre, fbuf.mf_alpha_next, numPoints*MAX_FLUIDNUM*sizeof(float), hipMemcpyDeviceToHost ) );
//checkCudaErrors( hipMemcpy ( pressure_modify, fbuf.mf_pressure_modify, numPoints*sizeof(float), hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy ( vel_phrel, fbuf.mf_vel_phrel, numPoints*MAX_FLUIDNUM*sizeof(float)*3, hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy ( visc, fbuf.mf_visc, numPoints*sizeof(float), hipMemcpyDeviceToHost ) );
//checkCudaErrors( hipMemcpy ( velxcor, fbuf.mf_velxcor, numPoints*sizeof(float)*3, hipMemcpyDeviceToHost ) );
//checkCudaErrors( hipMemcpy ( alphagrad, fbuf.mf_alphagrad, numPoints*MAX_FLUIDNUM*sizeof(float)*3, hipMemcpyDeviceToHost ) );
}
}
void CopyBoundFromCUDA (int* isbound )
{
int numPoints = fcuda.pnum;
if ( isbound != 0x0 ) checkCudaErrors( hipMemcpy ( isbound, fbuf.misbound, numPoints*sizeof(int), hipMemcpyDeviceToHost ) );
hipDeviceSynchronize ();
}
void CopyFromCUDA_Uproject(int* mftype, float*beta)
{
int numPoints = fcuda.pnum;
checkCudaErrors( hipMemcpy( mftype, fbuf.MFtype, numPoints*sizeof(int), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(beta, fbuf.mf_beta, numPoints * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM, hipMemcpyDeviceToHost));
hipDeviceSynchronize ();
}
//Called when particles emitted
void CopyEmitToCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr, int startnum, int numcount,int* isbound )
{
// Send particle buffers
checkCudaErrors( hipMemcpy ( fbuf.mpos+startnum, pos+startnum*3, numcount*sizeof(float)*3, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.maccel+startnum, vel+startnum*3, numcount*sizeof(float)*3, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mveleval+startnum, veleval+startnum*3, numcount*sizeof(float)*3, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mforce+startnum, force+startnum*3, numcount*sizeof(float)*3, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mpress+startnum, pressure+startnum, numcount*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mdensity+startnum, density+startnum, numcount*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mclr+startnum, clr+startnum, numcount*sizeof(uint), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.misbound + startnum, isbound + startnum, numcount*sizeof(int), hipMemcpyHostToDevice ) );
hipDeviceSynchronize ();
}
void CopyEmitMfToCUDA ( float* alpha, float* alpha_pre, float* pressure_modify, float* vel_phrel, float* restmass, float* restdensity, float* visc, float* velxcor, float* alphagrad,int startnum, int numcount)
{
// Send particle buffers
int mulstartnum = startnum*MAX_FLUIDNUM;
checkCudaErrors( hipMemcpy ( fbuf.mf_alpha + mulstartnum, alpha + mulstartnum, numcount*MAX_FLUIDNUM*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mf_alpha_next + mulstartnum, alpha_pre + mulstartnum, numcount*MAX_FLUIDNUM*sizeof(float), hipMemcpyHostToDevice ) );
//checkCudaErrors( hipMemcpy ( fbuf.mf_pressure_modify+startnum, pressure_modify+startnum, numcount*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mf_vel_phrel + mulstartnum, vel_phrel + mulstartnum*3, numcount*MAX_FLUIDNUM*sizeof(float)*3, hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mf_restmass+startnum, restmass+startnum, numcount*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mf_restdensity+startnum, restdensity+startnum, numcount*sizeof(float), hipMemcpyHostToDevice ) );
checkCudaErrors( hipMemcpy ( fbuf.mf_visc+startnum, visc+startnum, numcount*sizeof(float), hipMemcpyHostToDevice ) );
//checkCudaErrors( hipMemcpy ( fbuf.mf_velxcor+startnum, velxcor+startnum*3, numcount*sizeof(float)*3, hipMemcpyHostToDevice ) );
//checkCudaErrors( hipMemcpy ( fbuf.mf_alphagrad + mulstartnum, alphagrad + mulstartnum*3, numcount*MAX_FLUIDNUM*sizeof(float)*3, hipMemcpyHostToDevice ) );
hipDeviceSynchronize ();
}
void UpdatePNumCUDA( int newPnum)
{
fcuda.pnum = newPnum;
computeNumBlocks ( fcuda.pnum, 384, fcuda.numBlocks, fcuda.numThreads); //threads changed!
fcuda.szPnts = (fcuda.numBlocks * fcuda.numThreads); //szPnts changed!
checkCudaErrors( hipMemcpyToSymbol ( simData, &fcuda, sizeof(FluidParams) ) );
hipDeviceSynchronize ();
}
int MfGetPnum(){
return fcuda.pnum;
}
//Called in RunSimulateCudaFull
void InitialSortCUDA( uint* gcell, uint* ccell, int* gcnt )
{
hipMemset ( fbuf.mgridcnt, 0, fcuda.gridTotal * sizeof(int));
hipMemset ( fbuf.mgridoff, 0, fcuda.gridTotal * sizeof(int));
hipMemset ( fbuf.mgcell, 0, fcuda.pnum * sizeof(uint));
hipLaunchKernelGGL(( InitialSort), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR: InsertParticlesCUDA: %s\n", hipGetErrorString(error) );
}
hipDeviceSynchronize ();
// Transfer data back if requested (for validation)
if (gcell != 0x0) {
checkCudaErrors( hipMemcpy ( gcell, fbuf.mgcell, fcuda.pnum*sizeof(uint), hipMemcpyDeviceToHost ) );
checkCudaErrors( hipMemcpy ( gcnt, fbuf.mgridcnt, fcuda.gridTotal*sizeof(int), hipMemcpyDeviceToHost ) );
//checkCudaErrors( hipMemcpy ( ccell, fbuf.mcluster, fcuda.pnum*sizeof(uint), hipMemcpyDeviceToHost ) );
}
}
void SortGridCUDA( int* goff )
{
thrust::device_ptr<uint> dev_keysg(fbuf.mgcell);
thrust::device_ptr<uint> dev_valuesg(fbuf.midsort);
thrust::sort_by_key(dev_keysg,dev_keysg+fcuda.pnum,dev_valuesg);
hipDeviceSynchronize ();
hipLaunchKernelGGL(( CalcFirstCnt) , dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
// hipDeviceSynchronize ();
hipDeviceSynchronize ();
hipLaunchKernelGGL(( GetCnt) , dim3(fcuda.numBlocks),dim3(fcuda.numThreads), 0, 0, fbuf,fcuda.pnum);
hipDeviceSynchronize ();
/*
uint* test,*test1;
test = (uint*)malloc(sizeof(uint)*fcuda.pnum);
test1 = (uint*)malloc(sizeof(uint)*fcuda.gridTotal);
hipMemcpy(test,fbuf.mgcell,sizeof(uint)*fcuda.pnum,hipMemcpyDeviceToHost);
hipMemcpy(test1,fbuf.mgridoff,sizeof(uint)*fcuda.gridTotal,hipMemcpyDeviceToHost);
for (int i = 0;i<fcuda.pnum;i++)
if (test[i]!=GRID_UNDEF)
printf("%u %u %u\n",test[i],test1[test[i]]);
*/
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR:SortGridCUDA: %s\n", hipGetErrorString(error));
}
}
void CountingSortFullCUDA_( uint* ggrid )
{
// Transfer particle data to temp buffers
int n = fcuda.pnum;
hipMemcpy ( fbuf.msortbuf + n*BUF_POS, fbuf.mpos, n*sizeof(float)*3, hipMemcpyDeviceToDevice );
hipMemcpy ( fbuf.msortbuf + n*BUF_VELEVAL, fbuf.mveleval, n*sizeof(float)*3, hipMemcpyDeviceToDevice );
hipMemcpy ( fbuf.msortbuf + n*BUF_PRESS, fbuf.mpress, n*sizeof(float), hipMemcpyDeviceToDevice );
hipMemcpy(fbuf.msortbuf + n*BUF_GCELL, fbuf.mgcell, n * sizeof(uint), hipMemcpyDeviceToDevice);
hipMemcpy(fbuf.msortbuf + n*BUF_GNDX, fbuf.mgndx, n * sizeof(uint), hipMemcpyDeviceToDevice);
hipMemcpy(fbuf.msortbuf + n*BUF_ISINSIDE, fbuf.isInside, n * sizeof(bool), hipMemcpyDeviceToDevice);
#ifdef NEW_BOUND
hipMemcpy(fbuf.msortbuf + n*BUF_ISBOUND, fbuf.misbound, n * sizeof(int), hipMemcpyDeviceToDevice);
#endif
//multi fluid
hipMemcpy(fbuf.msortbuf + n*BUF_ALPHA, fbuf.mf_alpha, n*MAX_FLUIDNUM * sizeof(float), hipMemcpyDeviceToDevice);
hipMemcpy(fbuf.msortbuf + n*BUF_ALPHAPRE, fbuf.mf_alpha_next, n*MAX_FLUIDNUM * sizeof(float), hipMemcpyDeviceToDevice);
hipMemcpy(fbuf.msortbuf + n*BUF_RMASS, fbuf.mf_restmass, n * sizeof(float), hipMemcpyDeviceToDevice);
//porous
hipMemcpy ( fbuf.msortbuf + n*BUF_INDICATOR, fbuf.MFtype, n*sizeof(int), hipMemcpyDeviceToDevice );
//an implicit SPH formulation for elastic body
hipMemcpy ( fbuf.msortbuf + n*BUF_ELASTICID, fbuf.elasticID, n * sizeof(uint), hipMemcpyDeviceToDevice);
hipMemcpy ( fbuf.msortbuf + n*BUF_ABSORBEDPERCENT, fbuf.mf_beta, n * MAX_FLUIDNUM * sizeof(float) * MAX_SOLIDNUM, hipMemcpyDeviceToDevice);
hipMemcpy(fbuf.msortbuf + n*BUF_BETANEXT, fbuf.mf_beta_next, n * MAX_FLUIDNUM * sizeof(float) * MAX_SOLIDNUM, hipMemcpyDeviceToDevice);
//hipMemcpy(fbuf.msortbuf + n*BUF_POROVEL, fbuf.poroVel, n *MAX_FLUIDNUM * sizeof(float3), hipMemcpyDeviceToDevice);
// Counting Sort - pass one, determine grid counts
hipMemset ( fbuf.mgrid, GRID_UCHAR, fcuda.pnum * sizeof(int) );
hipLaunchKernelGGL(( CountingSortFull_) , dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum);
hipDeviceSynchronize ();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR:Sorting Failed: %s\n", hipGetErrorString(error) );
}
////checkCudaErrors(hipMemcpyFromSymbol(&(fcuda.pnum), pNumFT, sizeof(int))); //total pnum changed!
////computeNumBlocks ( fcuda.pnum, 384, fcuda.numBlocks, fcuda.numThreads); //threads changed!
////fcuda.szPnts = (fcuda.numBlocks * fcuda.numThreads); //szPnts changed!
//// printf("pnum:%d,Blocknum:%d,Threadnum:%d\n",fcuda.pnum,fcuda.numBlocks,fcuda.numThreads);
////hipDeviceSynchronize ();
}
void initSPH(float* restdensity,int* mftype)
{
hipLaunchKernelGGL(( initDensity), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum);
hipDeviceSynchronize();
}
void TestFunc()
{
testFunc << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: MfFindNearestVelCUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
}
void MfComputePressureCUDA ()
{
//mfFindNearest<<< fcuda.numBlocks, fcuda.numThreads>>> (fbuf, fcuda.pnum);
//hipError_t error = hipGetLastError();
//if (error != hipSuccess) {
// fprintf ( stderr, "CUDA ERROR: MfFindNearestVelCUDA: %s\n", hipGetErrorString(error) );
//}
//hipDeviceSynchronize ();
hipLaunchKernelGGL(( mfPreComputeDensity), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR: MfPreComputeDensityVelCUDA: %s\n", hipGetErrorString(error) );
}
hipDeviceSynchronize ();
hipLaunchKernelGGL(( mfComputePressure), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR: MfComputePressureVelCUDA: %s\n", hipGetErrorString(error) );
}
hipDeviceSynchronize ();
}
void MfPredictAdvection(float time)
{
applyAlphaAndBeta << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: MfComputeDriftVelCUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
FindNearbySolid << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute fluid percent change CUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeSolidPorePressure << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute pore pressure CUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
//step1:compute density
mfPreComputeDensity << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: MfPreComputeDensityVelCUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeOtherForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, time);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: MfComputeOtherForceCUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
//step2:compute intermediate velocity
computeMidVel << <fcuda.numBlocks, fcuda.numThreads >> >(fbuf, fcuda.pnum);
//updateVelocity << <fcuda.numBlocks, fcuda.numThreads >> >(time, fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: Compute mid vel: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeBRestVolume << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: Compute rest volume: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeVolume << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: Compute volume: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeSource << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: Compute source: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeAII << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: ComputeAII: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
}
void PressureSolve(int fluid_beginIndex,int fluid_endIndex)
{
int l = 0;
float averror;
float sum, length = fluid_endIndex - fluid_beginIndex;
float eta = 0.1;
hipError_t error;
float last_error = 1;
do {
//iterate compute pressure
l++;
//upgrade force to compute the error
ComputePressureForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: ComputePressureForce: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeCriterion << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: Compute Criterion: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
thrust::device_ptr<float> dev_deltadens(fbuf.delta_density);
thrust::device_vector<float> deltadens(dev_deltadens + fluid_beginIndex, dev_deltadens + fluid_endIndex);
//averror = thrust::reduce(deltadens.begin(), deltadens.end()) / thrust::reduce(dens.begin(), dens.end());
averror = thrust::reduce(deltadens.begin(), deltadens.end()) / (fluid_endIndex - fluid_beginIndex);
//printf("the %dth iteration over.\n", l);
//if (l > 10)
// break;
if (abs(averror-last_error)/last_error < 0.001||l>100)
break;
last_error = averror;
} while (l<3 || abs(averror)>eta);
ApplyPressureForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: ComputePressureForce: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
}
void MfComputeDriftVelCUDA ()
{
hipError_t error;
ComputeSolidPorePressure << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute pore pressure CUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( mfComputeDriftVel), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR: MfComputeDriftVelCUDA: %s\n", hipGetErrorString(error) );
}
hipDeviceSynchronize ();
}
void MfComputeAlphaAdvanceCUDA ()
{
hipError_t error;
//mfComputeDriftVel << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = hipGetLastError();
//if (error != hipSuccess) {
// fprintf(stderr, "CUDA ERROR: MfComputeDriftVelCUDA: %s\n", hipGetErrorString(error));
//}
//hipDeviceSynchronize();
//mfComputeTDM << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = hipGetLastError();
//if (error != hipSuccess) {
// fprintf(stderr, "CUDA ERROR: MfComputeTDM CUDA: %s\n", hipGetErrorString(error));
//}
//hipDeviceSynchronize();
//mfComputeAlphaAdvance << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = hipGetLastError();
//if (error != hipSuccess) {
// fprintf(stderr, "CUDA ERROR: MfComputeAlphaAdvanceCUDA: %s\n", hipGetErrorString(error));
//}
//hipDeviceSynchronize();
//ComputeFluidAdvance << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = hipGetLastError();
//if (error != hipSuccess) {
// fprintf(stderr, "CUDA ERROR: compute fluid advance CUDA: %s\n", hipGetErrorString(error));
//}
//hipDeviceSynchronize();
mfComputeCorrection << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: MfComputeCorrectionCUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
}
void MfComputeCorrectionCUDA ()
{
/*if(fcuda.example == 5)
mfComputeCorrection5<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
else*/
hipLaunchKernelGGL(( mfComputeCorrection), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, fbuf, fcuda.pnum );
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf ( stderr, "CUDA ERROR: MfComputeCorrectionCUDA: %s\n", hipGetErrorString(error) );
}
hipDeviceSynchronize ();
}
//void ComputeForceCUDA_ProjectU(float time)
//{
// ////(8)T_Sm
// //ComputeForce_projectu<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
// //hipError_t error = hipGetLastError();
// //if (error != hipSuccess)
// // fprintf ( stderr, "CUDA ERROR: MfComputeForceCUDA: %s\n", hipGetErrorString(error) );
// //hipDeviceSynchronize ();
//
// //hipDeviceSynchronize();
//
// //AddSPHtensorForce<<<fcuda.numBlocks, fcuda.numThreads>>>(fbuf, fcuda.pnum, time);
// //error = hipGetLastError();
// //if (error != hipSuccess)
// // fprintf ( stderr, "CUDA ERROR: Adding SPH tensor Force: %s\n", hipGetErrorString(error) );
// //hipDeviceSynchronize ();
//
//
//}
//Mathematics
__device__ inline double RxPythag(const double a, const double b)
{
double absa = abs(a), absb = abs(b);
return (absa > absb ? absa*(double)sqrt((double)(1.0+(absb/absa)*(absb/absa))) :
(absb == 0.0 ? 0.0 : absb*(double)sqrt((double)(1.0+(absa/absb)*(absa/absb)))));
}
__device__ inline double RXD_MIN(const double &a, const double &b){ return ((a < b) ? a : b); }
__device__ inline double RXD_MAX(const double &a, const double &b){ return ((a > b) ? a : b); }
__device__ inline double RXD_SIGN2(const double &a, const double &b){ return b >= 0 ? (a >= 0 ? a : -a) : (a >= 0 ? -a : a); }
__device__ int svdecomp3(float w[3], float u[9], float v[9], float eps)
{
bool flag;
int i, its, j, jj, k, l, nm;
float anorm, c, f, g, h, s, scale, x, y, z;
float rv1[3];
g = scale = anorm = 0.0;
for(i = 0; i < 3; ++i){
l = i+2;
rv1[i] = scale*g;
g = s = scale = 0.0;
for(k = i; k < 3; ++k) scale += abs(u[k*3+i]);
if(scale != 0.0){
for(k = i; k < 3; ++k){
u[k*3+i] /= scale;
s += u[k*3+i]*u[k*3+i];
}
f = u[i*3+i];
g = -RXD_SIGN2(sqrt(s), f);
h = f*g-s;
u[i*3+i] = f-g;
for(j = l-1; j < 3; ++j){
for(s = 0.0, k = i; k < 3; ++k) s += u[k*3+i]*u[k*3+j];
f = s/h;
for(k = i; k < 3; ++k) u[k*3+j] += f*u[k*3+i];
}
for(k = i; k < 3; ++k) u[k*3+i] *= scale;
}
w[i] = scale*g;
g = s = scale = 0.0;
if(i+1 <= 3 && i+1 != 3){
for(k = l-1; k < 3; ++k) scale += abs(u[i*3+k]);
if(scale != 0.0){
for(k = l-1; k < 3; ++k){
u[i*3+k] /= scale;
s += u[i*3+k]*u[i*3+k];
}
f = u[i*3+l-1];
g = -RXD_SIGN2(sqrt(s), f);
h = f*g-s;
u[i*3+l-1] = f-g;
for(k = l-1; k < 3; ++k) rv1[k] = u[i*3+k]/h;
for(j = l-1; j < 3; ++j){
for(s = 0.0,k = l-1; k < 3; ++k) s += u[j*3+k]*u[i*3+k];
for(k = l-1; k < 3; ++k) u[j*3+k] += s*rv1[k];
}
for(k = l-1; k < 3; ++k) u[i*3+k] *= scale;
}
}
anorm = RXD_MAX(anorm, (abs(w[i])+abs(rv1[i])));
}
for(i = 2; i >= 0; --i){
if(i < 2){
if(g != 0.0){
for(j = l; j < 3; ++j){
v[j*3+i] = (u[i*3+j]/u[i*3+l])/g;
}
for(j = l; j < 3; ++j){
for(s = 0.0, k = l; k < 3; ++k) s += u[i*3+k]*v[k*3+j];
for(k = l; k < 3; ++k) v[k*3+j] += s*v[k*3+i];
}
}
for(j = l; j < 3; ++j) v[i*3+j] = v[j*3+i] = 0.0;
}
v[i*3+i] = 1.0;
g = rv1[i];
l = i;
}
for(i = 2; i >= 0; --i){
l = i+1;
g = w[i];
for(j = l; j < 3; ++j) u[i*3+j] = 0.0;
if(g != 0.0){
g = 1.0/g;
for(j = l; j < 3; ++j){
for(s = 0.0, k = l; k < 3; ++k) s += u[k*3+i]*u[k*3+j];
f = (s/u[i*3+i])*g;
for(k = i; k < 3; ++k) u[k*3+j] += f*u[k*3+i];
}
for(j = i; j < 3; ++j) u[j*3+i] *= g;
}
else{
for(j = i; j < 3; ++j) u[j*3+i] = 0.0;
}
++u[i*3+i];
}
for(k = 2; k >= 0; --k){
for(its = 0; its < 30; ++its){
flag = true;
for(l = k; l >= 0; --l){
nm = l-1;
if(l == 0 || abs(rv1[l]) <= eps*anorm){
flag = false;
break;
}
if(abs(w[nm]) <= eps*anorm) break;
}
if(flag){
c = 0.0;
s = 1.0;
for(i = l; i < k+1; ++i){
f = s*rv1[i];
rv1[i] = c*rv1[i];
if(abs(f) <= eps*anorm) break;
g = w[i];
h = RxPythag(f, g);
w[i] = h;
h = 1.0/h;
c = g*h;
s = -f*h;
for(j = 0; j < 3; ++j){
y = u[j*3+nm];
z = u[j*3+i];
u[j*3+nm] = y*c+z*s;
u[j*3+i] = z*c-y*s;
}
}
}
z = w[k];
if(l == k){
if(z < 0.0){
w[k] = -z;
for(j = 0; j < 3; ++j) v[j*3+k] = -v[j*3+k];
}
break;
}
if(its == 29){
//printf("no convergence in 30 svdcmp iterations");
return 0;
}
x = w[l];
nm = k-1;
y = w[nm];
g = rv1[nm];
h = rv1[k];
f = ((y-z)*(y+z)+(g-h)*(g+h))/(2.0*h*y);
g = RxPythag(f, 1.0f);
f = ((x-z)*(x+z)+h*((y/(f+RXD_SIGN2(g, f)))-h))/x;
c = s = 1.0;
for(j = l; j <= nm; ++j){
i = j+1;
g = rv1[i];
y = w[i];
h = s*g;
g = c*g;
z = RxPythag(f, h);
rv1[j] = z;
c = f/z;
s = h/z;
f = x*c+g*s;
g = g*c-x*s;
h = y*s;
y *= c;
for(jj = 0; jj < 3; ++jj){
x = v[jj*3+j];
z = v[jj*3+i];
v[jj*3+j] = x*c+z*s;
v[jj*3+i] = z*c-x*s;
}
z = RxPythag(f, h);
w[j] = z;
if(z){
z = 1.0/z;
c = f*z;
s = h*z;
}
f = c*g+s*y;
x = c*y-s*g;
for(jj = 0; jj < 3; ++jj){
y = u[jj*3+j];
z = u[jj*3+i];
u[jj*3+j] = y*c+z*s;
u[jj*3+i] = z*c-y*s;
}
}
rv1[l] = 0.0;
rv1[k] = f;
w[k] = x;
}
}
// reorder
int inc = 1;
float sw;
float su[3], sv[3];
do{
inc *= 3;
inc++;
}while(inc <= 3);
do{
inc /= 3;
for(i = inc; i < 3; ++i){
sw = w[i];
for(k = 0; k < 3; ++k) su[k] = u[k*3+i];
for(k = 0; k < 3; ++k) sv[k] = v[k*3+i];
j = i;
while (w[j-inc] < sw){
w[j] = w[j-inc];
for(k = 0; k < 3; ++k) u[k*3+j] = u[k*3+j-inc];
for(k = 0; k < 3; ++k) v[k*3+j] = v[k*3+j-inc];
j -= inc;
if (j < inc) break;
}
w[j] = sw;
for(k = 0; k < 3; ++k) u[k*3+j] = su[k];
for(k = 0; k < 3; ++k) v[k*3+j] = sv[k];
}
}while(inc > 1);
for(k = 0; k < 3; ++k){
s = 0;
for(i = 0; i < 3; ++i) if(u[i*3+k] < 0.) s++;
for(j = 0; j < 3; ++j) if(v[j*3+k] < 0.) s++;
if(s > 3){
for(i = 0; i < 3; ++i) u[i*3+k] = -u[i*3+k];
for(j = 0; j < 3; ++j) v[j*3+k] = -v[j*3+k];
}
}
return 1;
}
__device__ void multiply_matrix3(float* a, float* b, float* c){
float d[9];
for(int i=0; i<3; i++)
for(int j=0; j<3; j++)
d[i*3+j] = a[i*3+0]*b[0*3+j]+a[i*3+1]*b[1*3+j]+a[i*3+2]*b[2*3+j];
for(int k=0; k<9; k++)
c[k] = d[k];
}
__device__ float3 multiply_mv3(float*m,float3 v)
{
float3 a;
a.x = m[0] * v.x + m[1] * v.y + m[2] * v.z;
a.y = m[3] * v.x + m[4] * v.y + m[5] * v.z;
a.z = m[6] * v.x + m[7] * v.y + m[8] * v.z;
return a;
}
__device__ void transmit3(float* a,float* b){
float c[9];
c[0]=a[0]; c[1]=a[3]; c[2]=a[6];
c[3]=a[1]; c[4]=a[4]; c[5]=a[7];
c[6]=a[2]; c[7]=a[5]; c[8]=a[8];
for(int k=0; k<9; k++)
b[k]=c[k];
}
//__device__ float3 cross(const float3 v1,const float3 v2)
//{
// float3 result;
// result.x = v1.y*v2.z - v1.z*v2.y;
// result.y = v1.z*v2.x - v1.x*v2.z;
// result.z = v1.x*v2.y - v1.y*v2.x;
// return result;
//}
__device__ float3 col(const float* matrix,int col)
{
float3 result = make_float3(matrix[col], matrix[col + 3], matrix[col + 6]);
return result;
}
////qR
//__device__ void QuaternionToMatrix(const float*q, float*R)
//{
// R[0] = 1 - 2 * q[1] * q[1] - 2 * q[2] * q[2];
// R[1] = 2 * q[0] * q[1] - 2 * q[3] * q[2];
// R[2] = 2 * q[0] * q[2] + 2 * q[3] * q[1];
// R[3] = 2 * q[0] * q[1] + 2 * q[3] * q[2];
// R[4] = 1 - 2 * q[0] * q[0] - 2 * q[2] * q[2];
// R[5] = 2 * q[1] * q[2] - 2 * q[3] * q[0];
// R[6] = 2 * q[0] * q[2] - 2 * q[3] * q[1];
// R[7] = 2 * q[1] * q[2] + 2 * q[3] * q[0];
// R[8] = 1 - 2 * q[0] * q[0] - 2 * q[1] * q[1];
//}
////q x,y,z,w
//__device__ void extractRotation(int i,const float* A, float *q, const unsigned int maxIter)
//{
// float R[9];
// float temp_q[4];
// float norm;
// for (unsigned int iter = 0; iter < maxIter; iter++)
// {
// //translate q to matrix R
// QuaternionToMatrix(q, R);
// /*if (i == 37000)
// printf("R is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
// R[0], R[1], R[2], R[3], R[4], R[5], R[6], R[7], R[8]);
// if (i == 37000)
// printf("A is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
// A[0], A[1], A[2], A[3], A[4], A[5], A[6], A[7], A[8]);*/
// /*for (int i = 0; i < 9; ++i)
// R[i] = q[i];*/
// //Matrix3d R = q.matrix();
// float3 omega =
// (cross(col(R, 0),col(A,0))
// + cross(col(R, 1),col(A,1))
// + cross(col(R, 2),col(A,2)))
// * (1.0 / fabs(dot(col(R, 0),col(A,0))
// + dot(col(R, 1),col(A,1)) + dot(col(R, 2),col(A,2))) + 1.0e-9);
// if (i == 37000 && iter == 0)
// printf("omega is (%f,%f,%f)\n", omega.x, omega.y, omega.z);
// float w = sqrt(dot(omega,omega));
// if (w < 1.0e-9)
// break;
// omega /= w;
// temp_q[3] = w*q[3] - omega.x*q[0] - omega.y*q[1] - omega.z*q[2];
// temp_q[0] = w*q[0] + omega.x*q[3] + omega.y*q[2] - omega.z*q[1];
// temp_q[1] = w*q[1] + omega.y*q[3] + omega.z*q[0] - omega.x*q[2];
// temp_q[2] = w*q[2] + omega.z*q[3] + omega.x*q[1] - omega.y*q[0];
// //if (i == 37000)
// // printf("omega is (%f,%f,%f,%f)\n", omega.x, omega.y, omega.z, w);
// /*a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),
// a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),
// a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),
// a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()*/
// norm = sqrt(temp_q[0] * temp_q[0] + temp_q[1] * temp_q[1] + temp_q[2] * temp_q[2] + temp_q[3] * temp_q[3]);
// //if (norm < 1.0e-9)
// // break;
// for (int i = 0; i < 4; ++i)
// q[i] = temp_q[i] / (norm + 1.0e-9);
//
// }
//}
__device__ void AxisToRotation(float* R,const float3 axis,const float angle)
{
float co = cos(angle), si = sin(angle);
R[0] = co + (1 - co)*axis.x*axis.x; R[1] = (1 - co)*axis.x*axis.y - si*axis.z; R[2] = (1 - co)*axis.x*axis.z + si*axis.y;
R[3] = (1 - co)*axis.y*axis.x + si*axis.z; R[4] = co + (1 - co)*axis.y*axis.y; R[5] = (1 - co)*axis.y*axis.z - si*axis.x;
R[6] = (1 - co)*axis.z*axis.x - si*axis.y; R[7] = (1 - co)*axis.z*axis.y + si*axis.x; R[8] = co + (1 - co)*axis.z*axis.z;
}
__device__ void extractRotation(const float*A, float*q, const unsigned int maxIter)
{
float R[9];
float norm;
float3 sum = make_float3(0, 0, 0);
float sum2 = 0;
//float error = 100000,error2;
for (unsigned int iter = 0; iter < maxIter; iter++)
//while(true)
{
sum = make_float3(0, 0, 0);
sum2 = 0;
for (int i = 0; i < 3; ++i)
{
sum += cross(col(q, i), col(A, i));
sum2 += dot(col(q, i), col(A, i));
}
sum2 = fabs(sum2) + 1.0e-9;
sum /= sum2;
sum2 = sqrt(dot(sum, sum));
if (sum2 < 1.0e-9)
break;
sum /= sum2;
AxisToRotation(R, sum, sum2);
multiply_matrix3(R, q, q);
/*error2 = 0;
for (int k = 0; k < 3; ++k)
error2 += dot(col(q, k), col(A, k));
if (fabs(error - error2) < 1 || fabs((error - error2) / error) < 0.001)
break;*/
}
}
__device__ float det(const float* a){
float det = a[0]*a[4]*a[8] + a[1]*a[5]*a[6] + a[2]*a[3]*a[7];
det -= (a[2]*a[4]*a[6] + a[1]*a[3]*a[8] + a[5]*a[7]*a[0]);
return det;
}
__device__ void tensorProduct(const float3 a,const float3 b,float* r)
{
r[0] = a.x * b.x; r[1] = a.x * b.y; r[2] = a.x * b.z;
r[3] = a.y * b.x; r[4] = a.y * b.y; r[5] = a.y * b.z;
r[6] = a.z * b.x; r[7] = a.z * b.y; r[8] = a.z * b.z;
}
//
__device__ void InverseMatrix3(float * B)
{
float E[9];
for (int i = 0; i<3; ++i)
{
for (int j = 0; j<3; ++j)
E[i*3 + j] = 0;
E[i*3 + i] = 1;
}
for (int k = 0; k<3; ++k)
{
//a[k][k]
for (int j = k + 1; j<3; ++j)
B[k*3 + j] = B[k*3 + j] / B[k*3 + k];
for (int j = 0; j<3; ++j)
E[k*3 + j] /= B[k*3 + k];
B[k*3 + k] = 1.0;
//a[i][k] * a[k][j]
for (int i = k + 1; i<3; ++i)
{
for (int j = k + 1; j<3; ++j)
{
B[i*3 + j] = B[i*3 + j] - B[i*3 + k] * B[k*3 + j];
}
for (int j = 0; j<3; ++j)
E[i*3 + j] -= B[i*3 + k] * E[k*3 + j];
B[i*3 + k] = 0;
}
}
for (int k = 2; k >= 0; --k)
{
//B[i][k]
for (int i = k - 1; i >= 0; --i)
{
for (int j = 0; j<3; ++j)
E[i*3 + j] -= B[i*3 + k] * E[k*3 + j];
B[i*3 + k] = 0;
}
}
for (int i = 0; i < 9; ++i)
B[i] = E[i];
}
//Change density if needed
__global__ void mfChangeDensity (bufList buf,int pnum,const float scale)
{
simData.mf_dens[1] *= scale;
simData.mf_up = 1;
simData.mf_visc[1] = simData.mf_visc[0];
simData.VL = 0.3;
simData.VL2 = 0.3*0.3;
}
//The forces of boundary to fluid
__device__ float3 nor(float3 p)
{
float n1 = 0,n2 = 0,n3 = 0;
if (p.y<(int)simData.pboundmin.y) n2 = 1.0;
if (p.x<(int)simData.pboundmin.x) n1 = 1.0;
if (p.x>(int)simData.pboundmax.x) n1 = -1.0;
if (p.z<(int)simData.pboundmin.z) n3 = 1.0;
if (p.z>(int)simData.pboundmax.z) n3 = -1.0;
return make_float3(n1,n2,n3);
}
__device__ double flushData ( int i, float3 p, int cell, bufList buf )
{
float3 dist;
float dsq, c, sum;
//float massj;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
int j;
//float maxdis = 88888;
// register float cmterm;
sum = 0.0;
if ( buf.mgridcnt[cell] == 0 ) return 0;
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
for ( int cndx = cfirst; cndx < clast; cndx++ ){
if (buf.misbound[buf.mgrid[cndx]] == 0)
{
j = buf.mgrid[cndx];
dist = p - buf.mpos[ buf.mgrid[cndx] ];
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if ( dsq < r2 && dsq > 0.0)
{
c = (r2 - dsq)*d2;
sum += c * c * c * buf.mf_restmass[j]*dot(buf.mveleval[j],nor(buf.mpos[i]));
}
}
}
//c = r2*d2;
//sum += c*c*c*buf.mf_restmass[i];
return sum;
}
__device__ void findNearest ( int i, float3 p, int cell, bufList buf )
{
float3 dist;
float dsq;
// float massj;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
int j;
float maxdis = 88888;
// register float cmterm;
//register float3 alphagrad[MAX_FLUIDNUM];
//sum = 0.0;
if ( buf.mgridcnt[cell] == 0 ) return ;
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
for ( int cndx = cfirst; cndx < clast; cndx++ ) {
#ifdef NEW_BOUND
if (buf.misbound[buf.mgrid[cndx]] == 0)
{
j = buf.mgrid[cndx];
dist = p - buf.mpos[ buf.mgrid[cndx] ];
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if ( dsq < r2 && dsq > 0.0 && dsq*d2<maxdis)
{
maxdis = dsq*d2;
buf.midsort[i] = j;
}
}
#else
j = buf.mgrid[cndx];
dist = p - buf.mpos[ buf.mgrid[cndx] ];
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if ( dsq < r2 && dsq > 0.0 && dsq*d2<maxdis)
{
maxdis = dsq*d2;
buf.midsort[i] = j;
}
#endif
}
return ;
}
__global__ void mfFindNearest (bufList buf,int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
// Sum Pressures
float3 pos = buf.mpos[ i ];
#ifdef NEW_BOUND
if (buf.misbound[i]==1)
{
buf.midsort[i] = i;
buf.mf_restmass[i] = simData.pmass;
for (int c = 0; c<simData.gridAdjCnt; c++)
{
findNearest(i,pos,gc+simData.gridAdj[c],buf);
}
if (buf.midsort[i]!=i)
buf.mf_restmass[i] = buf.mf_restmass[buf.midsort[i]];
}
#endif
}
//Sorting
__global__ void InitialSort ( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
register float3 gridMin = simData.gridMin;
register float3 gridDelta = simData.gridDelta;
register int3 gridRes = simData.gridRes;
register int3 gridScan = simData.gridScanMax;
// register float poff = simData.psmoothradius / simData.psimscale;
register int gs;
register float3 gcf;
register int3 gc;
gcf = (buf.mpos[i] - gridMin) * gridDelta;
gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) );
gs = (gc.y * gridRes.z + gc.z)*gridRes.x + gc.x;
if ( gc.x >= 1 && gc.x <= gridScan.x && gc.y >= 1 && gc.y <= gridScan.y && gc.z >= 1 && gc.z <= gridScan.z ) {
buf.mgcell[i] = gs; // Grid cell insert.
buf.midsort[i] = i;
// buf.mgndx[i] = atomicAdd ( &buf.mgridcnt[ gs ], 1 ); // Grid counts.
// gcf = (-make_float3(poff,poff,poff) + buf.mpos[i] - gridMin) * gridDelta;
// gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) );
// gs = ( gc.y * gridRes.z + gc.z)*gridRes.x + gc.x;
//buf.mcluster[i] = gs; -- make sure it is allocated!
} else {
buf.mgcell[i] = GRID_UNDEF;
buf.midsort[i] = i;
//buf.mcluster[i] = GRID_UNDEF; -- make sure it is allocated!
}
}
__global__ void CalcFirstCnt ( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (i>=pnum) return;
if ((i == 0 || buf.mgcell[i]!=buf.mgcell[i-1]))
{
if (buf.mgcell[i]!=GRID_UNDEF)buf.mgridoff[buf.mgcell[i]] = i;
}
__syncthreads();
if (i!=0 && buf.mgcell[i]!=buf.mgcell[i-1] && buf.mgcell[i-1]!=GRID_UNDEF)
buf.mgridcnt[buf.mgcell[i-1]] = i;
if (i == pnum-1 && buf.mgcell[i]!=GRID_UNDEF)
buf.mgridcnt[buf.mgcell[i]] = i + 1;
/*
__shared__ uint scell[512]; // [blockDim.x+1}
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
uint cel;
if (i<pnum && buf.mgcell[i] != GRID_UNDEF)
{
cel=buf.mgcell[i];
scell[threadIdx.x+1]=cel;
if(i&&!threadIdx.x)scell[0]=buf.mgcell[i-1];
}
__syncthreads();
if(i<pnum && buf.mgcell[i] != GRID_UNDEF)
{
if(!i||cel!=scell[threadIdx.x])
{
buf.mgridoff[cel]=i;
if (i)
{
buf.mgridcnt[scell[threadIdx.x]] = i;
}
if (i == pnum - 1)
buf.mgridcnt[scell[threadIdx.x]] = i+1;
}
}
else if (i<pnum)
{
if (buf.mgcell[i] != scell[threadIdx.x])
{
buf.mgridcnt[scell[threadIdx.x]] = i;
}
}
*/
}
__global__ void GetCnt ( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i>=pnum) return ;
if (buf.mgcell[i]!=GRID_UNDEF)
{
buf.mgndx[i] = i - buf.mgridoff[buf.mgcell[i]];
if (buf.mgndx[i] == 0)
buf.mgridcnt[buf.mgcell[i]] -= buf.mgridoff[buf.mgcell[i]];
}
}
__global__ void CountingSortFull_ ( bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
uint icell = *(uint*) (buf.msortbuf + pnum*BUF_GCELL + i*sizeof(uint) );
uint indx = *(uint*) (buf.msortbuf + pnum*BUF_GNDX + i*sizeof(uint) );
int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset
// uint j = i;
i = buf.midsort[i];
if ( icell != GRID_UNDEF ) {
buf.mgrid[ sort_ndx ] = sort_ndx; // full sort, grid indexing becomes identity
char* bpos = buf.msortbuf + i*sizeof(float3);
buf.mpos[ sort_ndx ] = *(float3*) (bpos);
buf.mveleval[ sort_ndx ] = *(float3*) (bpos + pnum*BUF_VELEVAL );
buf.mpress[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_PRESS + i*sizeof(float) );
#ifdef NEW_BOUND
buf.misbound[ sort_ndx ] = *(int*) (buf.msortbuf + pnum*BUF_ISBOUND+ i*sizeof(int) ); // ((uint) 255)<<24; -- dark matter
#endif
buf.isInside[sort_ndx] = *(bool*)(buf.msortbuf + pnum*BUF_ISINSIDE + i * sizeof(bool));
buf.mgcell[ sort_ndx ] = icell;
buf.mgndx[ sort_ndx ] = indx;
//multi fluid
int mul_sort_ndx = sort_ndx*MAX_FLUIDNUM;
for( uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
//char* bmul = buf.msortbuf + i*sizeof(float)*MAX_FLUIDNUM + fcount * sizeof(float);
buf.mf_alpha[mul_sort_ndx+fcount] = *(float*)(buf.msortbuf + pnum*BUF_ALPHA + i*sizeof(float)*MAX_FLUIDNUM + fcount * sizeof(float));
buf.mf_alpha_next[mul_sort_ndx+fcount] = *(float*)(buf.msortbuf + pnum*BUF_ALPHAPRE+ i*sizeof(float)*MAX_FLUIDNUM + fcount * sizeof(float));
//porous
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
buf.mf_beta[mul_sort_ndx*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM+l]
= *(float*)(buf.msortbuf + pnum*BUF_ABSORBEDPERCENT + i * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount * sizeof(float)*MAX_SOLIDNUM + l*sizeof(float));
buf.mf_beta_next[mul_sort_ndx*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l]
= *(float*)(buf.msortbuf + pnum*BUF_BETANEXT + i * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount * sizeof(float)*MAX_SOLIDNUM + l * sizeof(float));
}
//buf.capillaryPotentials[mul_sort_ndx + fcount] = *(float*)(buf.msortbuf + pnum*BUF_CP + i * sizeof(float)*MAX_FLUIDNUM + fcount * sizeof(float));
}
//buf.mf_pressure_modify[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_PRESSMODI + i*sizeof(float));
buf.mf_restmass[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_RMASS + i*sizeof(float));
//buf.mf_velxcor[sort_ndx] = *(float3*)(buf.msortbuf + pnum*BUF_VELXCOR + i*sizeof(float3));
buf.MFtype[sort_ndx] = *(int*)(buf.msortbuf+ pnum*BUF_INDICATOR + i*sizeof(int));
//elastic information
buf.elasticID[sort_ndx] = *(uint*)(buf.msortbuf + pnum*BUF_ELASTICID + i * sizeof(uint));
if(buf.MFtype[sort_ndx] == 2)
buf.particleID[buf.elasticID[sort_ndx]] = sort_ndx;
if(_example == 2 && buf.MFtype[sort_ndx] >= 2)
buf.particleID[buf.elasticID[sort_ndx]] = sort_ndx;
}
}
//compute pressure
__device__ float mfContributePressure ( int i, float3 p, int cell, bufList buf, float& sum_solid, float& sum_fluid)
{
float3 dist;
float dsq, c, sum;
float massj;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
sum = 0.0;
int j;
if ( buf.mgridcnt[cell] == 0 )
return 0.0;
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
for ( int cndx = cfirst; cndx < clast; cndx++ ) {
j = buf.mgrid[cndx];
dist = p - buf.mpos[ buf.mgrid[cndx] ];
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if ( dsq < r2 && dsq > 0.0) {
c = (r2 - dsq)*d2;
sum += c * c * c * buf.mf_restmass[i];
if (buf.MFtype[i] == buf.MFtype[j])
{
if (buf.MFtype[i] == 0)
sum_fluid += c * c * c * buf.mf_restmass[i];
else
sum_solid += c * c * c * buf.mf_restmass[i];
}
if (buf.MFtype[i] + buf.MFtype[j] == 9)
sum_solid += c * c * c * buf.mf_restmass[i];
}
}
return sum;
}
__device__ float mfContributePressureInit ( int i, float3 p, int cell, bufList buf )
{
float3 dist;
float dsq, c, sum;
float massj;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
sum = 0.0;
int j;
if ( buf.mgridcnt[cell] == 0 )
return 0.0;
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
for ( int cndx = cfirst; cndx < clast; cndx++ ) {
j = buf.mgrid[cndx];
//if( buf.MFtype[i] == 2 && buf.MFtype[j]!=2)
if(buf.MFtype[i]!=buf.MFtype[j])
continue;
dist = p - buf.mpos[ buf.mgrid[cndx] ];
massj = buf.mf_restmass[ buf.mgrid[cndx] ];
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if ( dsq < r2 && dsq > 0.0) {
c = (r2 - dsq)*d2;
sum += c * c * c * massj;
}
}
return sum;
}
__global__ void mfPreComputeDensity ( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
// Sum Pressures
float3 pos = buf.mpos[ i ];
float dens = buf.mf_restdensity[i];
float sum = 0.0;
float sum_solid = 0.0;
float sum_fluid = 0.0;
for (int c=0; c < simData.gridAdjCnt; c++) {
sum += mfContributePressure ( i, pos, gc + simData.gridAdj[c], buf, sum_solid, sum_fluid);
//__syncthreads();
}
// Compute Density & Pressure
sum += simData.r2 * simData.r2 * simData.r2 * buf.mf_restmass[i];
sum_solid += simData.r2 * simData.r2 * simData.r2 * buf.mf_restmass[i];
//sum_fluid += simData.r2 * simData.r2 * simData.r2 * buf.mf_restmass[i];
sum = sum * simData.poly6kern;
sum_solid = sum_solid * simData.poly6kern;
sum_fluid = sum_fluid * simData.poly6kern;
if ( sum == 0.0 ) sum = 1.0;
#ifdef NEW_BOUND
buf.mdensity[ i ] = 1.0f / sum;
if (buf.MFtype[i] != 0)
{
buf.density_solid[i] = 1.0f / sum_solid;
//if (i % 10 == 0)
// printf("solid density is %f\n", buf.density_solid[i]);0.0026
}
#else
buf.mpress[ i ] = ( sum - dens ) * simData.pintstiff;
//buf.mpress[ i ] = (pow( sum/dens,7.0f )-1) * simData.pintstiff;
//buf.mpress[ i ] = simData.pintstiff * dens * (pow( sum/dens,7.0f )-1);
buf.mdensity[ i ] = 1.0f / sum;
#endif
}
__global__ void mfComputePressure ( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
// Sum Pressures
float3 pos = buf.mpos[ i ];
float dens = buf.mf_restdensity[i];
float sum = 0.0;
float sum_solid = 0;
float sum_fluid = 0;
for(uint fcount = 0; fcount<simData.mf_catnum;fcount++)
{
//buf.mf_alphagrad[i*MAX_FLUIDNUM+fcount] = make_float3(0,0,0);
buf.mf_alpha_next[i*MAX_FLUIDNUM+fcount] = buf.mf_alpha[i*MAX_FLUIDNUM+fcount];
buf.mf_beta[i*MAX_FLUIDNUM + fcount] = 0;
buf.mf_beta_next[i*MAX_FLUIDNUM + fcount] = 0;
}
/*if (buf.MFtype[i] == 0 && buf.mpos[i].y < 30)
{
buf.mf_alpha_next[i*MAX_FLUIDNUM + 2] = buf.mf_alpha[i*MAX_FLUIDNUM + 2] = 0;
buf.mf_beta[i*MAX_FLUIDNUM + 2] = 0.5;
}*/
for (int c=0; c < simData.gridAdjCnt; c++) {
sum += mfContributePressure ( i, pos, gc + simData.gridAdj[c], buf, sum_solid, sum_fluid );
//__syncthreads();
}
buf.isInside[i] = false;
// Compute Density & Pressure
sum += simData.r2 * simData.r2 * simData.r2 * buf.mf_restmass[i];
sum = sum * simData.poly6kern;
if ( sum == 0.0 ) sum = 1.0;
#ifdef NEW_BOUND
if (buf.misbound[i] ==1)
{
//buf.mpress[i] = ( sum - dens ) * simData.pextstiff;
//buf.mpress[ i ] = (pow( sum/dens,7.0f )-1) * simData.pintstiff;
//buf.mpress[ i ] += simData.pintstiff * dens * (pow( sum/dens,7.0f )-1);
buf.mpress[ i ] = ( sum - dens ) * simData.pintstiff;
//if (buf.mpress[i]<0) buf.mpress[i] = 0;
}
else
{
//buf.mpress[ i ] = ( sum - dens ) * simData.pintstiff;
//buf.mpress[ i ] = (pow( sum/dens,7.0f )-1) * simData.pintstiff;
//buf.mpress[ i ] = ( sum - dens ) * simData.pintstiff;
if( buf.MFtype[i]>=2)
buf.mpress[ i ] = simData.solid_pfactor * dens * (pow( sum/dens,7.0f )-1);
if( buf.MFtype[i]==0){
buf.mpress[ i ] = simData.fluid_pfactor * dens * (pow( sum/dens,7.0f )-1);
if(buf.mpress[i]<0)
buf.mpress[i]=0;
}
// buf.mdensity[ i ] = 1.0f / sum;
}
#else
buf.mpress[ i ] = ( sum - dens ) * simData.pintstiff;
//buf.mpress[ i ] = (pow( sum/dens,7.0f )-1) * simData.pintstiff;
//buf.mpress[ i ] = simData.pintstiff * dens * (pow( sum/dens,7.0f )-1);
buf.mdensity[ i ] = 1.0f / sum;
#endif
//buf.mpress[ i ] = (pow( sum/dens,7.0f )-1) * simData.pintstiff;
//buf.mpress[ i ] = simData.pintstiff * dens * (pow( sum/dens,7.0f )-1);
buf.vel_mid[i] = buf.mveleval[i];
}
__global__ void initDensity(bufList buf,int pnum){
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
if(buf.MFtype[i] == 0) //no need for fluid particles
return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
float3 pos = buf.mpos[ i ];
float sum = 0.0;
for (int c=0; c < simData.gridAdjCnt; c++) {
sum += mfContributePressureInit ( i, pos, gc + simData.gridAdj[c], buf );
}
sum += simData.r2 * simData.r2 * simData.r2 * buf.mf_restmass[i];
sum = sum * simData.poly6kern;
//now sum is density
buf.mf_restdensity[i] = sum;
//if (i == 0)
// printf("rest density is %f\n", buf.mf_restdensity[i]);
buf.mveleval[i] = make_float3(0, 0, 0);
buf.vel_mid[i] = make_float3(0, 0, 0);
}
//compute drift velocity
__device__ void contributeDriftVel( int i, int muli, float3 ipos, float idens, float ipress, int cell, bufList buf, float* ialpha, float* imassconcen, float3* idriftvelterm, float relax_coef, float3*ialphagrad){
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
float3 dist;
float cmterm;
float pmterm;
int j, mulj;
if ( buf.mgridcnt[cell] == 0 ) return;
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
float3 force = make_float3(0,0,0);
float3 pgrad[MAX_FLUIDNUM];
float3 pgradsum;
float3 cpgrad[MAX_FLUIDNUM];
float3 cpgradsum;
for ( int cndx = cfirst; cndx < clast; cndx++ ) {
j = buf.mgrid[ cndx ];
mulj = j * MAX_FLUIDNUM;
dist = ( ipos - buf.mpos[ j ] ); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
dist *= simData.psimscale;
if ( dsq < r2 && dsq > 0) {
//cx = (r2-dsq)*d2;
dsq = sqrt(dsq*d2);
c = ( simData.psmoothradius - dsq );
cmterm = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.mdensity[j];
if (buf.MFtype[j] == 0)
{
if (buf.mf_alpha_sum[j] < 0.000001)
continue;
//pressure
pgradsum = make_float3(0, 0, 0);
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
float jalphaprecount = buf.mf_alpha[mulj + fcount] / buf.mf_alpha_sum[j];
//float ialphaprecount = ialpha_pre[fcount];
pmterm = cmterm * (-ialpha[fcount] * ipress + jalphaprecount*buf.mpress[j]);
//pmterm = cmterm * (-ialpha_pre[fcount]*ipress + buf.mf_alpha_pre[mulj+fcount]*buf.mpress[j]);
pgrad[fcount] = pmterm * dist;
if (isnan(dot(cpgrad[fcount], cpgrad[fcount])))
continue;
pgradsum += pgrad[fcount] * imassconcen[fcount];
//grad alpha
ialphagrad[fcount] += (jalphaprecount - ialpha[fcount]) * cmterm * dist;
}
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
if (isnan(dot(cpgrad[fcount], cpgrad[fcount])))
continue;
idriftvelterm[fcount] -= relax_coef * (pgrad[fcount] - pgradsum);
}
}
if(buf.MFtype[j] >= 2)//capillary term
{
cpgradsum = make_float3(0, 0, 0);
for(int k=1;k<simData.mf_catnum;++k)
{
//float jalphaprecount = buf.mf_alpha[mulj + k] / buf.mf_alpha_sum[j];
pmterm = cmterm * (-buf.pressure_water[i*simData.mf_catnum*MAX_SOLIDNUM+k*MAX_SOLIDNUM+buf.MFtype[j]-2] + buf.pressure_water[j*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2]);
cpgrad[k] = pmterm * dist;
if (isnan(dot(cpgrad[k], cpgrad[k])))
{
//printf("cpgrad %d is (%f,%f,%f)\n", k, cpgrad[k].x, cpgrad[k].y, cpgrad[k].z);
continue;
}
cpgradsum += cpgrad[k] * imassconcen[k];
}
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
if (isnan(dot(cpgrad[fcount], cpgrad[fcount])))
continue;
idriftvelterm[fcount] -= relax_coef*simData.relax2* (cpgrad[fcount] - cpgradsum);
}
}
}
}
}
__global__ void applyAlphaAndBeta(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
//if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
register float3 accel = -buf.mforce[i]; // final accel (g-a) of last step was stored in here cf. advance,
register uint muloffseti = i * MAX_FLUIDNUM;
float alphasum = 0;
for (uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
//float temp = buf.mf_alpha[muloffseti+fcount];
//buf.mf_alpha_pre[muloffseti+fcount] = temp; //alpha->alpha_pre
buf.mf_alpha[muloffseti + fcount] = buf.mf_alpha_next[muloffseti + fcount];
alphasum += buf.mf_alpha_next[muloffseti + fcount];
//buf.mf_alphagrad[i*MAX_FLUIDNUM + fcount] = make_float3(0, 0, 0);
}
for (uint fcount = 0; fcount < MAX_FLUIDNUM*MAX_SOLIDNUM; fcount++)
buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount] = buf.mf_beta_next[i*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount];
float newdens, newvisc, newmass, newdensout;
//Restdensity Update
newdens = 0.0;
newvisc = 0.0;
//newdensout = 0.0;
//newmass = 0.0;
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
newdens += buf.mf_alpha[i*MAX_FLUIDNUM + fcount] * simData.mf_dens[fcount];
newvisc += buf.mf_alpha[i*MAX_FLUIDNUM + fcount] * simData.mf_visc[fcount];
}
float betasum = 0;
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
for(int l=0;l<MAX_SOLIDNUM;++l)
{
newdens += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] * simData.mf_dens[fcount];
newvisc += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] * simData.mf_visc[fcount];
betasum += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l];
}
//if (buf.MFtype[i] == 0)
{
buf.mf_restdensity[i] = newdens;
//buf.mf_restmass[i] = newmass;
buf.mf_visc[i] = newvisc;
buf.mf_restdensity_out[i] = newdensout;
}
if (buf.mf_restdensity[i] <= 10)
printf("rest den is %f, alpha is (%f,%f,%f), betasum is %f\n",
buf.mf_restdensity[i], buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2], buf.mf_alpha[i*MAX_FLUIDNUM + 3],
betasum);
}
__global__ void mfComputeDriftVel( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum)
return;
//if (i % 1000 == 0)
// for (uint fcount = 0; fcount < simData.mf_catnum; fcount++)
// printf("particle %d's pressure is %f\n",
// i, buf.mpress[i]);
if (buf.MFtype[i] != 0)
return;
if (buf.mf_alpha_sum[i] <= 0.01)
{
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
buf.mf_vel_phrel[i*MAX_FLUIDNUM + fcount] = make_float3(0, 0, 0);
return;
}
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
float relax_coef = simData.relax; // temporary relax time related coefficient
//register float relax_coef = 0;
float sigma = 0.001f;//0.001f; //diffusion&tension coefficient
float cont, conts, contr;
cont = simData.cont;
conts = simData.cont1;
contr = simData.cont2;
float3 accel = -buf.mforce[i]; // final accel (g-a) of last step was stored in here cf. advance,
//register float massFrack[MAX_FLUIDNUM];
uint muloffseti = i * MAX_FLUIDNUM;
float invdens = 1.0/buf.mf_restdensity_out[i];
float dsum;
float vrx, vry, vrz;
float tdiff;
float3 ssum;
float alpha[MAX_FLUIDNUM],mass_concen[MAX_FLUIDNUM];
float ipress = buf.mpress[ i ];
float3 ipos = buf.mpos[ i ];
float idens = buf.mdensity[ i ];
float3 driftVelterm[MAX_FLUIDNUM],alphaGradterm[MAX_FLUIDNUM];
float3 sterm[MAX_FLUIDNUM];
//various viscosity
relax_coef /= buf.mf_visc[i];
//relax_coef *= (99*buf.mf_alpha_pre[i*MAX_FLUIDNUM+2]+1);
//third term
for(uint fcount = 1;fcount < simData.mf_catnum; fcount++)
{
//float temp = buf.mf_alpha[muloffseti+fcount];
//buf.mf_alpha_pre[muloffseti+fcount] = temp; //alpha->alpha_pre
if (buf.mf_alpha_sum[i] > 0.0001)
alpha[fcount] = buf.mf_alpha[muloffseti + fcount] / buf.mf_alpha_sum[i];
else
alpha[fcount] = 0;
//mass_concen[fcount] = alpha[fcount]*simData.mf_dens[fcount]*invdens;
mass_concen[fcount] = alpha[fcount] * simData.mf_dens[fcount] * invdens;
//if (isnan(mass_concen[fcount]))
// printf("alpha pre is %f, invdens is %f\n",
// alpha_pre[fcount], invdens);
driftVelterm[fcount] = make_float3(0,0,0);
alphaGradterm[fcount] = make_float3(0,0,0);
}
for (int c=0; c < simData.gridAdjCnt; c++) {
contributeDriftVel ( i, muloffseti, ipos, idens, ipress, gc + simData.gridAdj[c], buf, alpha, mass_concen, driftVelterm, relax_coef, alphaGradterm);
}
for( uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
//buf.mf_vel_phrel[muloffseti+fcount] = cont * contr * driftVelterm[fcount];
float3 vel = cont * contr * driftVelterm[fcount];
buf.mf_vel_phrel[muloffseti+fcount] = vel;
}
//first term & second term
dsum = 0;
ssum = make_float3(0,0,0);
if(buf.mf_alpha_sum[i] > 0.01)
for( uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
float temp = buf.mf_alpha[muloffseti+fcount] / buf.mf_alpha_sum[i];
dsum += temp * simData.mf_dens[fcount] * simData.mf_dens[fcount] * invdens;
if (temp > 0.0001)
//sterm[fcount] = buf.mf_alphagrad[muloffseti+fcount]/temp;
sterm[fcount] = alphaGradterm[fcount] / temp;
else
sterm[fcount] = make_float3(0,0,0);
//sterm[fcount] = alphaGradterm[fcount];
ssum += sterm[fcount] * temp * simData.mf_dens[fcount] * invdens;
}
for( uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
tdiff = simData.mf_dens[fcount]-dsum;
tdiff *= relax_coef;
vrx = accel.x * tdiff;
vry = accel.y * tdiff;
vrz = accel.z * tdiff;
buf.mf_vel_phrel[muloffseti+fcount] += make_float3(vrx,vry,vrz);
buf.mf_vel_phrel[muloffseti+fcount] -=
cont * conts * sigma * (sterm[fcount]-ssum);
if (isnan(dot(buf.mf_vel_phrel[muloffseti + fcount], buf.mf_vel_phrel[muloffseti + fcount])))
//if(i%1000 ==0)
printf("particle %d phase %d's vel is (%f,%f,%f),accel is (%f,%f,%f),alpha is %f, sterm is (%f,%f,%f), driftVelterm is (%f,%f,%f), press is %f, mass concern is (%f,%f,%f), alphaSum is %f, densityout is %f, pressure water is (%f,%f,%f,%f), visco is %f, relax_coef is %f\n",
i, fcount, buf.mf_vel_phrel[muloffseti + fcount].x, buf.mf_vel_phrel[muloffseti + fcount].y,
buf.mf_vel_phrel[muloffseti + fcount].z, accel.x, accel.y, accel.z,
buf.mf_alpha[muloffseti + fcount], sterm[fcount].x, sterm[fcount].y, sterm[fcount].z,
driftVelterm[fcount].x, driftVelterm[fcount].y, driftVelterm[fcount].z, buf.mpress[i],
mass_concen[1], mass_concen[2], mass_concen[3], buf.mf_alpha_sum[i],buf.mf_restdensity_out[i],
buf.pressure_water[muloffseti*MAX_SOLIDNUM+fcount*MAX_SOLIDNUM+0], buf.pressure_water[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + 1],
buf.pressure_water[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + 2], buf.pressure_water[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + 3], buf.mf_visc[i], relax_coef);
}
}
__device__ float3 contributeTDM(int i, int muli, float idens, float3 pos, int cell, bufList buf, float* ialpha_pre, float3* ivmk)
{
float3 force = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0) return force;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist, sf;
float c, dsq2, dsq, q;
int j, mulj;
float cmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] != 0)
continue;
mulj = j * MAX_FLUIDNUM;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
q = sqrt(dsq2 / r2);
if (!(dsq2 < r2&&dsq2>0))
continue;
dsq = sqrt(dsq2);
if (q <= 0.5)
cmterm = simData.gradCubicSplineKern * (3 * q*q - 2 * q);
else
cmterm = -simData.gradCubicSplineKern * pow(1 - q, 2);
cmterm *= buf.mf_restmass[j] * buf.mdensity[j] / dsq;
//T_dm
for (uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
float3 dtermj = cmterm * dot(buf.mf_vel_phrel[mulj + fcount], dist) * buf.mf_alpha[mulj + fcount] * buf.mf_vel_phrel[mulj + fcount];
float3 dtermi = cmterm * dot(ivmk[fcount], dist) * ialpha_pre[fcount] * ivmk[fcount];
//example 2 doesn't have this term
force += (dtermj + dtermi) * simData.mf_dens[fcount] * idens;
}
}
return force;
}
__global__ void mfComputeTDM(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i] != 0) {
return;
}
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
bool error = false;
// Sum Pressures
float3 pos = buf.mpos[i];
float dens = buf.mf_restdensity[i];
float3 force = make_float3(0, 0, 0);
register uint muloffseti = i * MAX_FLUIDNUM;
register float alpha[MAX_FLUIDNUM];
register float3 ivmk[MAX_FLUIDNUM];
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
alpha[fcount] = buf.mf_alpha[muloffseti + fcount];
ivmk[fcount] = buf.mf_vel_phrel[muloffseti + fcount];
}
for (int c = 0; c < simData.gridAdjCnt; c++)
{
force += contributeTDM(i, muloffseti, buf.mdensity[i], pos, gc + simData.gridAdj[c], buf, alpha, ivmk);
}
if (isnan(dot(force,force)))
//if(i%1000 ==0)
printf("particle %d tdm is nan, press is %f, alphaSum is %f, densityout is %f\n",
i, buf.mpress[i],
buf.mf_alpha_sum[i], buf.mf_restdensity_out[i]);
//bound force and gravity
buf.mforce[i] += force;
//buf.fluidForce[i] += force;
buf.maccel[i] = buf.mforce[i];
}
//compute alpha advance
__device__ void contributeAlphaChange( int i, int muli, float3 ipos, float3 iveleval, float ipress, float idens, int cell, bufList buf, float* ialpha, float* ialphachange, float3* ivmk)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
float3 dist, vmr, vkr;
float cmterm;
int j, mulj;
//float3 jvmk[MAX_FLUIDNUM];
float jalpha_prek;
//float alphachange = 0.0;
if ( buf.mgridcnt[cell] == 0 ) return;// make_float3(0,0,0);
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
//force = make_float3(0,0,0);
//vterm = simData.lapkern * simData.pvisc;
for ( int cndx = cfirst; cndx < clast; cndx++ ) {
j = buf.mgrid[ cndx ];
#ifdef NEW_BOUND
if (buf.misbound[j] ==1) continue;
#endif
if(buf.MFtype[j] != buf.MFtype[i])
continue;
mulj = j * MAX_FLUIDNUM;
dist = ( ipos - buf.mpos[ j ] ); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
dist *= simData.psimscale;
if ( dsq < r2 && dsq > 0) {
dsq = sqrt(dsq*d2);
c = ( simData.psmoothradius - dsq );
cmterm = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.mdensity[j];
vmr = buf.mveleval[j] - iveleval;
for(uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
jalpha_prek = buf.mf_alpha[mulj+fcount];
//-alpha_k * (nabla cdot v_m)
ialphachange[fcount] -= 0.5 * cmterm * (jalpha_prek+ialpha[fcount]) * (vmr.x * dist.x + vmr.y * dist.y + vmr.z * dist.z);
//buf.mf_alpha[muli+fcount] -= 0.5 * cmterm * (jalpha_prek+ialpha_pre[fcount]) * (vmr.x * dist.x + vmr.y * dist.y + vmr.z * dist.z);
//-nabla cdot (alpha_k * u_mk)
vkr = make_float3((jalpha_prek * buf.mf_vel_phrel[mulj+fcount].x + ialpha[fcount] * ivmk[fcount].x),
(jalpha_prek * buf.mf_vel_phrel[mulj+fcount].y + ialpha[fcount] * ivmk[fcount].y),
(jalpha_prek * buf.mf_vel_phrel[mulj+fcount].z + ialpha[fcount] * ivmk[fcount].z));
ialphachange[fcount] -= cmterm * (vkr.x * dist.x + vkr.y * dist.y + vkr.z * dist.z);
//buf.mf_alpha[muli+fcount] -= cmterm * (vkr.x * dist.x + vkr.y * dist.y + vkr.z * dist.z);
}
//pterm = simData.psimscale * -0.5f * c * simData.spikykern * ( ipress + buf.mpress[ j ] ) / dsq;
//dterm = c * idens * (buf.mdensity[ j ] );
//force += ( pterm * dist + vterm * ( buf.mveleval[ j ] - iveleval )) * dterm;
}
}
//return force;
//return alphachange;
}
__global__ void mfComputeAlphaAdvance( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum)
return ;
if (buf.MFtype[i] != 0)
return;
if (buf.mf_alpha_sum[i] < 0.01)
return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
register uint muloffseti = i * MAX_FLUIDNUM;
register float3 ipos = buf.mpos[ i ];
register float3 iveleval = buf.mveleval[ i ];
register float ipress = buf.mpress[ i ];
register float idens = buf.mdensity[ i ];
register float alpha[MAX_FLUIDNUM],alphachange[MAX_FLUIDNUM];
register float3 ivmk[MAX_FLUIDNUM];
for(uint fcount = 1;fcount < simData.mf_catnum; fcount++)
{
alpha[fcount] = buf.mf_alpha[muloffseti+fcount];
alphachange[fcount] = 0.0f;
ivmk[fcount] = buf.mf_vel_phrel[muloffseti+fcount];
//buf.mf_alpha[muloffseti+fcount] = 0.0f;
}
for (int c=0; c < simData.gridAdjCnt; c++) {
contributeAlphaChange ( i, muloffseti, ipos, iveleval, ipress, idens, gc + simData.gridAdj[c], buf, alpha, alphachange, ivmk);
}
for(uint fcount = 1;fcount < simData.mf_catnum; fcount++)
{
//buf.mf_alpha[muloffseti+fcount] += alphachange[fcount] * simData.mf_dt;
alphachange[fcount] *= simData.mf_dt;
//alphachange limit
if(alphachange[fcount]<-0.99)
{
alphachange[fcount] = -0.99;// * ((int)(buf.mf_alpha[muloffseti+fcount]>0)-(int)(buf.mf_alpha[muloffseti+fcount]<0));
}
buf.mf_alphachange[i*MAX_FLUIDNUM + fcount] = alphachange[fcount];
//if (abs(alphachange[fcount]) >= 0.001)
// printf("particle %d's phase %d's alpha change is %f\n", i, fcount, alphachange[fcount]);
buf.mf_alpha_next[muloffseti+fcount] = alphachange[fcount] + alpha[fcount];
//buf.mf_alpha_next[muloffseti + fcount] = alpha[fcount];
if (isnan(alphachange[fcount]) || isnan(alpha[fcount]))
printf("particle %d phase %d's alpha change is %f, pre alpha is %f, vmk is (%f,%f,%f)\n",
i, fcount, alphachange[fcount], alpha[fcount],
buf.mf_vel_phrel[i*MAX_FLUIDNUM + fcount].x,
buf.mf_vel_phrel[i*MAX_FLUIDNUM + fcount].y, buf.mf_vel_phrel[i*MAX_FLUIDNUM + fcount].z);
//buf.mf_alpha[muloffseti+fcount] *= simData.mf_dt;
//if(buf.mf_alpha[muloffseti+fcount]<-0.99)
//{
// buf.mf_alpha[muloffseti+fcount] = -0.99;// * ((int)(buf.mf_alpha[muloffseti+fcount]>0)-(int)(buf.mf_alpha[muloffseti+fcount]<0));
//}
//buf.mf_alpha[muloffseti+fcount] += alpha_pre[fcount];
}
}
//compute correction
__global__ void mfComputeCorrection( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
uint muloffseti = i * MAX_FLUIDNUM;
float sum, alphasum = 0, betasum = 0, alphaPercent, betaPercent;
int flag;
sum = 0.0f;
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
for (int l = 0; l<MAX_SOLIDNUM; ++l)
{
if (buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] < 0.01)
buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] = 0;
//if (buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] > 0.99)
// buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] = 1.0f;
betasum += buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l];
}
if (buf.mf_alpha_next[muloffseti + fcount] < 0.01)
buf.mf_alpha_next[muloffseti + fcount] = 0.0f;
//if (buf.mf_alpha_next[muloffseti + fcount] > 0.99)
// buf.mf_alpha_next[muloffseti + fcount] = 1.0f;
alphasum += buf.mf_alpha_next[muloffseti + fcount];
}
sum = alphasum + betasum;
flag = (sum>0.0f);
sum = flag*sum + (1 - flag)*1.0f;
sum = 1.0 / sum;
alphaPercent = alphasum * sum;
betaPercent = betasum * sum;
if (betaPercent == 0)
betasum = 1;
else
betasum = 1 / betasum;
if (alphaPercent == 0)
alphasum = 1;
else
alphasum = 1 / alphasum;
//int cat = findMaxCat(alpha_pre, simData.mf_catnum, idx, idxlen);
int maxcat = 3*MAX_SOLIDNUM + 3;
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
buf.mf_alpha_next[muloffseti + fcount] = (flag)*buf.mf_alpha_next[muloffseti + fcount] * alphaPercent * alphasum + (1 - flag)*(fcount == maxcat ? 1 : 0);
for (int l = 0; l<MAX_SOLIDNUM; ++l)
buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] =
(flag)*buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] * betaPercent * betasum
+ (1 - flag)*(fcount*MAX_SOLIDNUM + l == maxcat ? 1 : 0);
}
//sum = 0;
//for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
//{
// sum += buf.mf_alpha_next[muloffseti + fcount];
// for (int l = 0; l < MAX_SOLIDNUM; ++l)
// sum += buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l];
//}
//if (abs(sum - 1) > 0.001)
// printf("correction lose function, sum is %f\n", sum);
}
__device__ float gamma(float q)
{
if (q<2.0/3.0 && q>0)
return 2.0/3.0;
if (q>=2.0/3.0 && q<1)
return 2*q-3.0/2.0*q*q;
if (q>=1 && q<2)
return (2-q)*(2-q)/2.0;
return 0;
}
////compute force
//__device__ float3 contributeMfForce( int i, int muli, float3 ipos, float3 iveleval, float ipress, float idens, int cell, bufList buf, float* ialpha_pre, float ipressure_modify, float3* ivmk, float3* ivelxcor, float ivisc)
//{
// float dsq, c;
// register float d2 = simData.psimscale * simData.psimscale;
// register float r2 = simData.r2/d2;
//
// float3 dist, vmr;
// float cmterm;
// float pmterm, vmterm;
// int j, mulj;
// float aveDenij,cx,xterm;
// //float3 jvmk[MAX_FLUIDNUM];
// //float jalpha_prek;
//
// if ( buf.mgridcnt[cell] == 0 ) return make_float3(0,0,0);
//
// int cfirst = buf.mgridoff[ cell ];
// int clast = cfirst + buf.mgridcnt[ cell ];
//
// float3 force = make_float3(0,0,0);
// //massi = buf.mf_restmass[i];
// for ( int cndx = cfirst; cndx < clast; cndx++ ) {
// j = buf.mgrid[ cndx ];
//// massj = buf.mf_restmass[j];
// mulj = j * MAX_FLUIDNUM;
// dist = ( ipos - buf.mpos[ j ] ); // dist in cm
// dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
// dist *= simData.psimscale;
// if ( dsq < r2 && dsq > 0) {
// cx = (r2-dsq)*d2;
// dsq = sqrt(dsq*d2);
// c = ( simData.psmoothradius - dsq );
// cmterm = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.mdensity[j];
// //pressure
// if (buf.misbound[j] != 1)
// {
// pmterm = -0.5f * cmterm * (ipress + ipressure_modify + buf.mpress[j] + buf.mf_pressure_modify[j])*idens;
// //pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
// force += pmterm * dist;
// //viscosity
// vmr = iveleval - buf.mveleval[j]; //This is different from that in contributeAlphaChange()
// vmterm = cmterm * (ivisc+buf.mf_visc[j]) * idens;
// force += vmterm * vmr;
// }
// else
// {
// pmterm = -0.5f * cmterm * (ipress + ipressure_modify + buf.mpress[j])*idens;
// //pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
// force += pmterm * dist*0.03;
// //viscosity
// vmr = iveleval - buf.mveleval[j]; //This is different from that in contributeAlphaChange()
// vmterm = cmterm * (ivisc+buf.mf_visc[j]) * idens;
// force += vmterm * vmr*0.03;
// }
// /*
// else pmterm = -0.5f * cmterm * (ipress + ipressure_modify + buf.mpress[j])*idens/30.0;
// if (buf.misbound[j] ==1)
// vmterm/= 30.0;
// */
// if (buf.misbound[j] != 1)
// //T_dm
// for(uint fcount = 0; fcount < simData.mf_catnum; fcount++)
// {
// float3 dtermj = cmterm * (buf.mf_vel_phrel[mulj+fcount].x * dist.x + buf.mf_vel_phrel[mulj+fcount].y * dist.y + buf.mf_vel_phrel[mulj+fcount].z * dist.z) * buf.mf_alpha_next[mulj+fcount] * buf.mf_vel_phrel[mulj+fcount];
// float3 dtermi = cmterm * (ivmk[fcount].x * dist.x + ivmk[fcount].y * dist.y + ivmk[fcount].z * dist.z) * ialpha_pre[fcount] * ivmk[fcount];
// force += (dtermj + dtermi) * simData.mf_dens[fcount] * idens;
// }
//#ifndef _nXSPH
// //XSPH correction
// aveDenij = 2/(1/buf.mdensity[j]+1/idens);
// xterm = cx*cx*cx*buf.mf_restmass[j]*aveDenij*simData.poly6kern*0.5; //0.5=epsilon
// ivelxcor->x += -vmr.x * xterm;
// ivelxcor->y += -vmr.y * xterm;
// ivelxcor->z += -vmr.z * xterm;
// }
//#endif
// }
// return force;
//}
//advance particles
__device__ void mfChRebalance(int i, int muli, bufList buf, int firstReactor, int secondReactor, int product)
{
float chGamma = 0.01;
register float alpha1 = buf.mf_alpha[muli+firstReactor];
register float alpha2 = buf.mf_alpha[muli+secondReactor];
//register float alphap;
register float massTrans1, massTrans2;
//register float V0 = buf.mf_restmass[i] * buf.mdensity[i];
register float Vp;
register float rhop1 = simData.mf_dens[firstReactor];
register float rhop2 = simData.mf_dens[secondReactor];
register float rhopp = simData.mf_dens[product];
register float deltaAlphaP;
//chGamma *= (alpha1*alpha2);
chGamma *= (alpha1+alpha2);
if(chGamma == 0)return;
if(chGamma > alpha1)chGamma = alpha1;
if(chGamma > alpha2)chGamma = alpha2;
massTrans1 = chGamma * rhop1;
massTrans2 = chGamma * rhop2;
deltaAlphaP = (massTrans1 + massTrans2) / rhopp;
Vp = 1 + deltaAlphaP - 2 * chGamma;
Vp = 1/Vp;
buf.mf_alpha[muli+firstReactor] -= chGamma;
buf.mf_alpha[muli+secondReactor] -= chGamma;
buf.mf_alpha[muli+product] += deltaAlphaP;
for(uint fcount = 0; fcount<simData.mf_catnum; fcount++)
{
buf.mf_alpha[muli+fcount] *= Vp;
}
buf.mf_restdensity[i] *= Vp;
}
//**** shadow functions *******
__global__ void mfComputeDriftVelVelLimit( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum)
return ;
#ifdef NEW_BOUND
if(buf.misbound[i]==1)
return;
#endif
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
register float relax_coef = simData.relax; // temporary relax time related coefficient
register float sigma = 0.001f;//0.001f; //diffusion&tension coefficient
register float cont, conts, contr;
cont = simData.cont;
conts = simData.cont1;
contr = simData.cont2;
register float3 accel = buf.mforce[i]; // final accel (g-a) of last step was stored in here cf. advance,
//register float massFrack[MAX_FLUIDNUM];
register uint muloffseti = i * MAX_FLUIDNUM;
register float invdens = 1.0/buf.mf_restdensity[i];
register float dsum;
register float vrx, vry, vrz;
register float tdiff;
register float3 ssum;
register float alpha_pre[MAX_FLUIDNUM],mass_concen[MAX_FLUIDNUM];
register float ipress = buf.mpress[ i ];
register float3 ipos = buf.mpos[ i ];
register float idens = buf.mdensity[ i ];
register float3 driftVelterm[MAX_FLUIDNUM],alphaGradterm[MAX_FLUIDNUM];
register float3 sterm[MAX_FLUIDNUM];
//various viscosity
relax_coef /= buf.mf_visc[i];
//relax_coef *= (99*buf.mf_alpha_pre[i*MAX_FLUIDNUM+2]+1);
//third term
for(uint fcount = 0;fcount < simData.mf_catnum; fcount++)
{
//float temp = buf.mf_alpha[muloffseti+fcount];
//buf.mf_alpha_pre[muloffseti+fcount] = temp; //alpha->alpha_pre
alpha_pre[fcount] = buf.mf_alpha_next[muloffseti+fcount];
mass_concen[fcount] = alpha_pre[fcount]*simData.mf_dens[fcount]*invdens;
driftVelterm[fcount] = make_float3(0,0,0);
alphaGradterm[fcount] = make_float3(0,0,0);
}
for (int c=0; c < simData.gridAdjCnt; c++) {
contributeDriftVel ( i, muloffseti, ipos, idens, ipress, gc + simData.gridAdj[c], buf, alpha_pre, mass_concen, driftVelterm, relax_coef, alphaGradterm);
}
for( uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
//buf.mf_vel_phrel[muloffseti+fcount] = cont * contr * driftVelterm[fcount];
float3 vel = cont * contr * driftVelterm[fcount];
float speed = vel.x*vel.x + vel.y*vel.y + vel.z*vel.z;
if ( speed > simData.VL2 ) {
vel *= simData.VL / sqrt(speed);
}
buf.mf_vel_phrel[muloffseti+fcount] = vel;
}
//first term & second term
dsum = 0;
ssum = make_float3(0,0,0);
for( uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
//float temp = buf.mf_alpha[muloffseti+fcount];
//dsum += temp * simData.mf_dens[fcount] * simData.mf_dens[fcount] * invdens;
//buf.mf_alpha_pre[muloffseti+fcount] = temp; //alpha->alpha_pre
float temp = buf.mf_alpha_next[muloffseti+fcount];
dsum += temp * simData.mf_dens[fcount] * simData.mf_dens[fcount] * invdens;
if(temp>0.0001)
//sterm[fcount] = buf.mf_alphagrad[muloffseti+fcount]/temp;
sterm[fcount] = alphaGradterm[fcount]/temp;
else
sterm[fcount] = make_float3(0,0,0);
ssum += sterm[fcount] * temp * simData.mf_dens[fcount] * invdens;
}
for( uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
tdiff = simData.mf_dens[fcount]-dsum;
tdiff *= relax_coef;
vrx = accel.x * tdiff;
vry = accel.y * tdiff;
vrz = accel.z * tdiff;
buf.mf_vel_phrel[muloffseti+fcount] += make_float3(vrx,vry,vrz);
buf.mf_vel_phrel[muloffseti+fcount] -= cont * conts * sigma * (sterm[fcount]-ssum);
}
}
//***** End Shadow Functions *******
// ********** Project-u Functions *********
//__device__ float3 contributeForce_projectu(int i, int muli, float3 ipos, float3 iveleval, float ipress, float idens, int cell, bufList buf, float* ialpha_pre, float ipressure_modify, float3* ivmk, float3* ivelxcor, float ivisc)
//{
// //Force here represents the acceleration
// float dsq, c;
// register float d2 = simData.psimscale * simData.psimscale;
// register float r2 = simData.r2/d2;
//
// float3 dist, vmr ;
// float cmterm,cmterm1;
//// float massj;
// float pmterm, vmterm;
//// float q;
// int j, mulj;
// float aveDenij,cx,xterm;
//
// if ( buf.mgridcnt[cell] == 0 ) return make_float3(0,0,0);
//
// int cfirst = buf.mgridoff[ cell ];
// int clast = cfirst + buf.mgridcnt[ cell ];
//
// float3 force = make_float3(0,0,0);
// //massi = buf.mf_restmass[i];
//
// for ( int cndx = cfirst; cndx < clast; cndx++ )
// {
// j = buf.mgrid[ cndx ];
//
// //massj = buf.mf_restmass[j];
// mulj = j * MAX_FLUIDNUM;
// dist = ( ipos - buf.mpos[ j ] ); // dist in cm
// dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
// dist *= simData.psimscale;
//
// if ( dsq < r2 && dsq > 0) {
// cx = (r2-dsq)*d2;
// dsq = sqrt(dsq*d2);
// c = ( simData.psmoothradius - dsq );
//
// cmterm1 = simData.spikykern * c * c / dsq;
// cmterm = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.mdensity[j];
// //pressure
//#ifdef NEW_BOUND
// if (buf.misbound[j] != 1) //force between fluid and solid, force within fluid
// {
//
// if( buf.MFtype[j]==0)
// pmterm = -0.5f * cmterm * (ipress + buf.mpress[j] + buf.mf_pressure_modify[j] )*idens;
// else
// pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
//
// if(buf.MFtype[i]==0 && buf.MFtype[j]==1 && buf.mpress[j]<0)
// pmterm = -0.5f * cmterm * (ipress + 0)*idens;
//
// //pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
// //if( (buf.MFtype[i]==0 && buf.MFtype[j]==0))
// // force += pmterm * dist;
// ////
// //if(! (buf.MFtype[i]==1 && buf.MFtype[j]==1)){
// // force += pmterm * dist;
// //}
// if(buf.MFtype[i] == 0 && buf.MFtype[j] == 0)
// {
// force += pmterm * dist;
// }
//
// }
// else if(buf.MFtype[i]==0) //force from boundary particles to fluid particles
// {
// //pmterm = -0.5f * cmterm * (ipress + ipressure_modify + buf.mpress[j])*idens;
// //pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
//
// ////
// ////pressure
// //pmterm = - cmterm1 * buf.mf_restdensity[i] * buf.mf_restmass[j] /buf.mf_restdensity[j] *ipress *buf.mdensity[i]*buf.mdensity[i];
// //force += pmterm * dist * simData.omega;
//
// ////viscosity
// //vmr = iveleval - buf.mveleval[j]; //This is different from that in contributeAlphaChange()
// //float pi_ij = vmr.x*dist.x + vmr.y*dist.y + vmr.z*dist.z;
// //if(pi_ij < 0){
// // pi_ij = pi_ij / (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z + r2 * 0.01);
// // pi_ij = pi_ij * 2 * simData.psmoothradius * (ivisc + buf.mf_visc[j]) * idens /2;
// // pi_ij = - cmterm1 * buf.mf_restdensity[i] * buf.mf_restmass[j]/buf.mf_restdensity[j] * pi_ij;
// // force += pi_ij * dist * simData.visc_factor;
// //
// //}
//
// //vmterm = cmterm * (ivisc+buf.mf_visc[j]) * idens;
// //force += vmterm * vmr*0.03;
// }
// else{ //force from boundary particles to deformable/rigid particles
// /*
// pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
// force += pmterm * dist*0.03;
// vmr = iveleval - buf.mveleval[j];
// vmterm = cmterm * (ivisc+buf.mf_visc[j]) * idens;
// force += vmterm * vmr*0.03;*/
//
// //pressure
// pmterm = - cmterm1 * buf.mf_restdensity[i] * buf.mf_restmass[j] / buf.mf_restdensity[j] * (ipress) *buf.mdensity[i]*buf.mdensity[i];
// force += pmterm * dist * simData.omega;
//
// //viscosity
// vmr = iveleval - buf.mveleval[j]; //This is different from that in contributeAlphaChange()
// float pi_ij = vmr.x*dist.x + vmr.y*dist.y + vmr.z*dist.z;
// if(pi_ij < 0){
// pi_ij = pi_ij / (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z + r2 * 0.01);
// pi_ij = pi_ij * 2 * simData.psmoothradius * (ivisc + buf.mf_visc[j]) * idens /2;
// pi_ij = - cmterm1 * buf.mf_restdensity[i] * buf.mf_restmass[j]/buf.mf_restdensity[j] * pi_ij;
// force += pi_ij * dist * simData.visc_factor;
// }
// }
//
// if (buf.misbound[j] != 1)
// //T_dm
// for(uint fcount = 0; fcount < simData.mf_catnum; fcount++)
// {
// float3 dtermj = cmterm * (buf.mf_vel_phrel[mulj+fcount].x * dist.x + buf.mf_vel_phrel[mulj+fcount].y * dist.y + buf.mf_vel_phrel[mulj+fcount].z * dist.z) * buf.mf_alpha_next[mulj+fcount] * buf.mf_vel_phrel[mulj+fcount];
// float3 dtermi = cmterm * (ivmk[fcount].x * dist.x + ivmk[fcount].y * dist.y + ivmk[fcount].z * dist.z) * ialpha_pre[fcount] * ivmk[fcount];
// force += (dtermj + dtermi) * simData.mf_dens[fcount] * idens;
// }
//
//#else
// pmterm = -0.5f * cmterm * (ipress + ipressure_modify + buf.mpress[j] + buf.mf_pressure_modify[j])*idens;
// //pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
// force += pmterm * dist;
// //viscosity
// vmr = iveleval - buf.mveleval[j]; //This is different from that in contributeAlphaChange()
// vmterm = cmterm * (ivisc+buf.mf_visc[j]) * idens;
// force += vmterm * vmr;
// for(uint fcount = 0; fcount < simData.mf_catnum; fcount++)
// {
// float3 dtermj = cmterm * (buf.mf_vel_phrel[mulj+fcount].x * dist.x + buf.mf_vel_phrel[mulj+fcount].y * dist.y + buf.mf_vel_phrel[mulj+fcount].z * dist.z) * buf.mf_alpha_pre[mulj+fcount] * buf.mf_vel_phrel[mulj+fcount];
// float3 dtermi = cmterm * (ivmk[fcount].x * dist.x + ivmk[fcount].y * dist.y + ivmk[fcount].z * dist.z) * ialpha_pre[fcount] * ivmk[fcount];
// force += (dtermj + dtermi) * simData.mf_dens[fcount] * idens;
// }
//
//#endif
//#ifndef _nXSPH
// //XSPH correction
// aveDenij = 2/(1/buf.mdensity[j]+1/idens);
// xterm = cx*cx*cx*buf.mf_restmass[j]*aveDenij*simData.poly6kern*0.5; //0.5=epsilon
// ivelxcor->x += -vmr.x * xterm;
// ivelxcor->y += -vmr.y * xterm;
// ivelxcor->z += -vmr.z * xterm;
// }
//#endif
//
// }
// return force;
//}
//__global__ void ComputeForce_projectu ( bufList buf, int pnum)
//{
// uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
// if ( i >= pnum)
// return;
//#ifdef NEW_BOUND
// if(buf.misbound[i]==1)
// return;
//#endif
// // Get search cell
// int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
// uint gc = buf.mgcell[ i ];
// if ( gc == GRID_UNDEF ) return; // particle out-of-range
// gc -= nadj;
//
// register uint muloffseti = i * MAX_FLUIDNUM;
// register float3 ipos = buf.mpos[ i ];
// register float3 iveleval = buf.mveleval[ i ];
//
// register float idens = buf.mdensity[ i ];
// register float alpha_pre[MAX_FLUIDNUM];
// register float3 ivmk[MAX_FLUIDNUM];
// register float pressure_modify = buf.mf_pressure_modify[i];
// register float3 *ivelxcor = buf.mf_velxcor+i;
// register float ivisc = buf.mf_visc[i];
//
// register float ipress;
// if(buf.MFtype[i]==0)
// ipress = buf.mpress[i] + buf.mf_pressure_modify[i];
// else
// ipress = buf.mpress[i];
//
// register float3 force = make_float3(0,0,0);
// *ivelxcor = make_float3(0,0,0);
//
// for(uint fcount = 0;fcount < simData.mf_catnum; fcount++)
// {
// alpha_pre[fcount] = buf.mf_alpha_next[muloffseti+fcount];
// ivmk[fcount] = buf.mf_vel_phrel[muloffseti+fcount];
// }
//
// for (int c=0; c < simData.gridAdjCnt; c++) {
// force += contributeForce_projectu (i, muloffseti, ipos, iveleval, ipress, idens, gc + simData.gridAdj[c], buf, alpha_pre, pressure_modify, ivmk, ivelxcor, ivisc);
// }
// /*if (buf.MFtype[i] == 0 && i % 1000 == 0)
// printf("fluid force is (%f,%f,%f)\n", force.x, force.y, force.z);*/
// //if (buf.MFtype[i] == 1 && buf.elasticID[i] == 6)
// // printf("fluid force is (%f,%f,%f)\n", force.x, force.y, force.z);
// buf.mforce[ i ] = force;
//}
//__device__ void contributeVelocityGradient(float* result, int i, float3 ipos, float3 iveleval, int cell, bufList buf)
//{
// float dsq, c;
// register float d2 = simData.psimscale * simData.psimscale;
// register float r2 = simData.r2/d2;
//
// float3 dist, jveleval;
// float cmterm;
//// float massj,massi;
//
//// float q;
// int j;
//// float aveDenij,cx,xterm;
//
// if ( buf.mgridcnt[cell] == 0 ) return;
//
// int cfirst = buf.mgridoff[ cell ];
// int clast = cfirst + buf.mgridcnt[ cell ];
//
// //massi = buf.mf_restmass[i];
// for ( int cndx = cfirst; cndx < clast; cndx++ )
// {
// j = buf.mgrid[ cndx ];
// if( buf.MFtype[j] != 2)
// continue;
//
// //massj = buf.mf_restmass[j];
// //jveleval = buf.mveleval[j]*buf.mdensity[j]*buf.mdensity[j] + iveleval*buf.mdensity[i]*buf.mdensity[i];
// jveleval = buf.mveleval[j]-iveleval;
//
// dist = ( ipos - buf.mpos[ j ] ); // dist in cm
// dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
// dist *= simData.psimscale;
//
// if ( dsq < r2 && dsq > 0) {
// dsq = sqrt(dsq * d2);
// c = ( simData.psmoothradius - dsq );
// cmterm = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.mdensity[j];
// //cmterm = simData.spikykern * c * c / dsq;
// jveleval = jveleval * cmterm;
// result[0] += jveleval.x * dist.x; result[1] += jveleval.x * dist.y; result[2] += jveleval.x * dist.z;
// result[3] += jveleval.y * dist.x; result[4] += jveleval.y * dist.y; result[5] += jveleval.y * dist.z;
// result[6] += jveleval.z * dist.x; result[7] += jveleval.z * dist.y; result[8] += jveleval.z * dist.z;
// }
// }
//}
__device__ void print9(char* string,float* buf){
printf("%s\n%f %f %f\n%f %f %f\n%f %f %f\n",string,buf[0],buf[1],buf[2],
buf[3],buf[4],buf[5],buf[6],buf[7],buf[8]);
return;
}
__device__ float3 getBoundForce(int i,bufList buf, float3 force, float time){
register float3 accel, norm;
register float diff, adj, speed;
register float3 pos = buf.mpos[i];
register float3 veval = buf.mveleval[i];
accel = force;
// if (buf.MFtype[i] == 1)
// {
// // Boundaries
// // Y-axis
// diff = simData.pradius - (pos.y - (simData.pboundmin.y + (pos.x - simData.pboundmin.x)*simData.pground_slope)) * simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(-simData.pground_slope, 1.0 - simData.pground_slope, 0);
// adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
//
// //float3 veldamp=make_float3(veval.x, 0, veval.z);
// //buf.mveleval[i] -= veldamp * simData.omega;
// //veldamp=make_float3(vel.x, 0, vel.z);
// //buf.mvel[i] -= veldamp * simData.omega;
// }
//
// diff = simData.pradius - (simData.pboundmax.y - pos.y)*simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(0, -1, 0);
// adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
// }
//
//#ifdef _xzsoftmargin
// // X-axis
// diff = simData.pradius - (pos.x - (simData.pboundmin.x + (sin(time*simData.pforce_freq) + 1)*0.5 * simData.pforce_min))*simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(1, 0, 0);
// adj = (simData.pforce_min + 1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
// }
// diff = simData.pradius - ((simData.pboundmax.x - (sin(time*simData.pforce_freq) + 1)*0.5*simData.pforce_max) - pos.x)*simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(-1, 0, 0);
// adj = (simData.pforce_max + 1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
// }
//
// // Z-axis
// diff = simData.pradius - (pos.z - simData.pboundmin.z) * simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(0, 0, 1);
// adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
// }
// diff = simData.pradius - (simData.pboundmax.z - pos.z)*simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(0, 0, -1);
// adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
// }
//#endif
// }
//if (i % 500 == 0&&buf.misbound[i]!=1)
// printf("particle %d's accel is (%f,%f,%f)\n", i, accel.x, accel.y, accel.z);
// Accel Limit
/*speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z;
if ( speed > simData.AL2 ) {
accel *= simData.AL / sqrt(speed);
}*/
// Gravity
//accel += simData.pgravity;
return accel;
}
//__global__ void AddSPHtensorForce( bufList buf, int pnum, float time)
//{
// uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
// if ( i >= pnum) return;
// //if(buf.MFtype[i] != 1)
// // return;
//
// // Get search cell
// int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
// uint gc = buf.mgcell[ i ];
// if ( gc == GRID_UNDEF ) return; // particle out-of-range
// gc -= nadj;
//
//// register float3 ipos = buf.mpos[ i ];
//// float *itensor = buf.MFtemptensor + i*9;
//// float3 tensorForce = make_float3(0,0,0);
////
////
//
//// /*if(i%1000==0&&buf.misbound[i]!=1)
//// printf("%d tensorforce: %f %f %f\n",i, tensorForce.x, tensorForce.y, tensorForce.z);
////*/
//// buf.mforce[i] = buf.mforce[i] + tensorForce;
//// if (buf.MFtype[i] == 1 && buf.elasticID[i] == 1600)
//// printf("tensor force is (%f,%f,%f)\n", tensorForce.x, tensorForce.y, tensorForce.z);
// //Get Other force!
// buf.maccel[i] = buf.mforce[i];
// //if (buf.MFtype[i] == 1 && (buf.elasticID[i] == 6 || buf.elasticID[i] == 31))
// // printf("final force %d's is %f,%f,%f\n", buf.elasticID[i], buf.mvel[i].x, buf.mvel[i].y, buf.mvel[i].z);
// buf.mforce[i] = make_float3(0,0,0);
//}
//********************** end project-u ************************
void floatup_cuda(int mode){
fcuda.gravityfree = mode;
checkCudaErrors ( hipMemcpyToSymbol ( simData, &fcuda, sizeof(FluidParams) ) );
return;
}
__global__ void updatePosition(float time, bufList buf, int pnum){
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
if ( buf.mgcell[i] == GRID_UNDEF ) {
buf.mpos[i] = make_float3(-1000,-1000,-1000);
buf.maccel[i] = make_float3(0,0,0);
return;
}
// Get particle vars
register float3 accel, norm;
register float diff, adj, speed;
register float3 pos = buf.mpos[i];
register float3 veval = buf.mveleval[i];
float3 vel = buf.maccel[i];
register float newdens,newvisc, newmass;
// Leapfrog integration
accel = buf.maccel[i];
float beta[MAX_FLUIDNUM];
if (buf.misbound[i] != 1)
{
//float3 vnext = accel*simData.mf_dt + vel; // v(t+1/2) = v(t-1/2) + a(t) dt
//float3 tmpdeltaPos = (vnext + buf.mf_velxcor[i]) * (simData.mf_dt/simData.psimscale);
//float3 tmpPos = buf.mpos[i] + tmpdeltaPos;
buf.mforce[i] = accel; //use mvel to restore the first acceleration
float3 dPos = (buf.mveleval[i]*simData.mf_dt + 0.5* accel* simData.mf_dt* simData.mf_dt)/simData.psimscale;
buf.mpos[i] = buf.mpos[i] + dPos;
//Color Setting
//buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM+2],buf.mf_alpha[i*MAX_FLUIDNUM+1],buf.mf_alpha[i*MAX_FLUIDNUM+0],1);
//if(buf.MFtype[i]==0)
// buf.mclr[i] = COLORA(1,1,1,1);
//else
if (buf.MFtype[i] == 2 || (_example == 2&&buf.MFtype[i] >= 2))
{
//buf.mclr[i] = COLORA(1, 1, 0, 0.6);
int index = buf.elasticID[i];
for (int k = 1; k < MAX_FLUIDNUM; ++k)
beta[k] = buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k * MAX_SOLIDNUM + buf.MFtype[i] - 2];
if (_example == 2)
{
//float ratio = 3;
if (buf.MFtype[i] == 5)
buf.mclr[i] =
COLORA(1 / (1 + sqrt(beta[2] + beta[3])), 1 / (1 + sqrt(beta[1] + beta[3])), sqrt(beta[3])/(1+sqrt(beta[3])), !simData.HideSolid);
else
{
buf.mclr[i] =
COLORA(0, 1, 0, !simData.HideSolid);
}
}
else
buf.mclr[i] =
COLORA(1 - (beta[2] + beta[3]), 1 - (beta[1] + beta[3]), 1 - (beta[1] + beta[2]), !simData.HideSolid);
}
else
{
//for (int k = 1; k < MAX_FLUIDNUM; ++k)
// beta[k] = buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k * MAX_SOLIDNUM + 3];
//if(!simData.HideFluid)
buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2],
buf.mf_alpha[i*MAX_FLUIDNUM + 3],!simData.HideFluid*0.55*
(buf.mf_alpha[i*MAX_FLUIDNUM + 1] + buf.mf_alpha[i*MAX_FLUIDNUM + 2] +
buf.mf_alpha[i*MAX_FLUIDNUM + 3]));
//else
// buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM + 1] + beta[3], buf.mf_alpha[i*MAX_FLUIDNUM + 2] + beta[2],
// buf.mf_alpha[i*MAX_FLUIDNUM + 3]+beta[1], buf.isInside[i]*0.55);
//else
// buf.mclr[i] = COLORA(beta[3], beta[2], beta[1], beta[3] + beta[2] + beta[1]);
//buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM + 2] + buf.mf_beta[i*MAX_FLUIDNUM + 2],
// buf.mf_alpha[i*MAX_FLUIDNUM + 1] + buf.mf_beta[i*MAX_FLUIDNUM + 1],
// buf.mf_alpha[i*MAX_FLUIDNUM + 0] + buf.mf_beta[i*MAX_FLUIDNUM + 0], !simData.HideFluid);
//buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM + 2],
// buf.mf_alpha[i*MAX_FLUIDNUM + 1],
// buf.mf_alpha[i*MAX_FLUIDNUM + 0], !simData.HideFluid*(buf.mf_alpha[i*MAX_FLUIDNUM + 1]+ buf.mf_alpha[i*MAX_FLUIDNUM + 2]));
}
//buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM + 2], buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 0], 0);
}
else if (buf.misbound[i] == 1)
{
buf.mveleval[i] = make_float3(0,0,0); // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5
buf.maccel[i] = make_float3(0,0,0);
buf.mforce[i] = make_float3(0,0,0);
if (buf.MFtype[i] > 2)
{
for (int k = 1; k < MAX_FLUIDNUM; ++k)
beta[k] = buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k * MAX_SOLIDNUM + buf.MFtype[i] - 2];
float sum = beta[1] + beta[2] + beta[3] + 1;
buf.mclr[i] =
COLORA(1 - (beta[2] + beta[3]), 1 - (beta[1] + beta[3]), 1 - (beta[1] + beta[2]), !simData.HideRigid);
//buf.mclr[i] = COLORA((sqrt(beta[1]))/sum, (sqrt(beta[2]))/sum, (sqrt(beta[3]))/sum, !simData.HideRigid*(beta[1]+beta[2]+beta[3]));
//buf.mclr[i] = COLORA((1+beta[1])/sum, (1+beta[2])/sum, (1+beta[3])/sum, !simData.HideRigid);
//buf.mclr[i] = COLORA(1, 1, 1, !simData.HideBound);
}
else
{
buf.mclr[i] = COLORA(1, 1, 1, !simData.HideBound);
}
}
buf.mforce[i] = make_float3(0, 0, 0);
}
__global__ void updateVelocity(float time, bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
//if (buf.MFtype[i] == 3)return;
if ( buf.mgcell[i] == GRID_UNDEF ) {
buf.mpos[i] = make_float3(-1000,-1000,-1000);
buf.maccel[i] = make_float3(0,0,0);
return;
}
// Get particle vars
register float3 accel, accel1, accel2;
register float speed;
// Leapfrog integration
accel = buf.maccel[i];
if (isnan(dot(accel, accel)))
printf("particle %d's type is %d, accel is nan\n",
i, buf.MFtype[i]);
//if (buf.MFtype[i] == 0 && i % 10000 == 0)
// printf("particle %d's mixture vel is (%f,%f,%f), fluid vel is (%f,%f,%f)\n",
// i, buf.mveleval[i].x, buf.mveleval[i].y, buf.mveleval[i].z,
// buf.fluidVel[i*MAX_FLUIDNUM + 1].x, buf.fluidVel[i*MAX_FLUIDNUM + 1].y,
// buf.fluidVel[i*MAX_FLUIDNUM + 1].z);
speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z;
if (speed > simData.AL2) {
accel *= simData.AL / sqrt(speed);
}
accel += simData.pgravity;
buf.maccel[i] = accel;
////int index;
//if(simData.example == 1 || simData.example == 2)
// if (buf.MFtype[i] == 1)
// {
// int index = buf.elasticID[i];
// if(buf.frame[index] > 1200 && buf.frame[index] < 1600)
// accel -= 3 * simData.pgravity;
// if (buf.frame[index] == 1600)
// {
// buf.mveleval[i] = make_float3(0, 0, 0);
// accel -= simData.pgravity;
// }
// if (buf.frame[index] >= 1600)
// {
// accel -= simData.pgravity;
// if (buf.isSurface[index] && buf.frame[index] <= 2000 && buf.frame[index] >= 1800 && simData.example == 1)
// accel += -300 * buf.normal[index];
// }
// }
if (buf.misbound[i] != 1)
{
buf.mveleval[i] = buf.mveleval[i] + simData.mf_dt*accel;
{
//buf.mveleval[i] += (1-buf.fluidPercent[i])*simData.mf_dt*buf.poroForce[i];
float vm = dot(buf.mveleval[i], buf.mveleval[i]);// .x*buf.mveleval[i].x + buf.mveleval[i].y*buf.mveleval[i].y + buf.mveleval[i].z*buf.mveleval[i].z;
vm = sqrt(vm);
if (vm > simData.VL)
{
buf.mveleval[i] *= simData.VL / vm;
}
}
}
else if (buf.misbound[i] == 1)
{
buf.mveleval[i] = make_float3(0,0,0); // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5
buf.maccel[i] = make_float3(0,0,0);
buf.mforce[i] = make_float3(0,0,0);
//buf.mclr[i] = COLORA(1,1,1,0.8);
}
//buf.vel_mid[i] = buf.mveleval[i];
}
__global__ void computeMidVel(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
//if (buf.MFtype[i] == 3)return;
if (buf.mgcell[i] == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
// Get particle vars
register float3 accel, norm, pos = buf.mpos[i];
register float speed;
//buf.vel_mid[i] = buf.mveleval[i];
//if (dot(buf.vel_mid[i], buf.vel_mid[i])!=dot(buf.mveleval[i], buf.mveleval[i]))
// printf("particle %d's type is %d, vel is (%f,%f,%f), vel_mid is (%f,%f,%f)\n",
// i, buf.MFtype[i], buf.mveleval[i].x, buf.mveleval[i].y, buf.mveleval[i].z,
// buf.vel_mid[i].x, buf.vel_mid[i].y, buf.vel_mid[i].z);
// float scale_dens = 1000.0/buf.mf_restdensity[i];
accel = buf.maccel[i];
speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z;
if (speed > simData.AL2) {
accel *= simData.AL / sqrt(speed);
}
buf.mforce[i] = accel;
buf.fluidForce[i] = accel;
buf.maccel[i] = buf.mforce[i];
if (buf.misbound[i] != 1)
{
buf.vel_mid[i] = buf.mveleval[i] + simData.mf_dt*accel;
}
else
{
buf.mveleval[i] = make_float3(0, 0, 0); // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5
buf.maccel[i] = make_float3(0, 0, 0);
buf.mforce[i] = make_float3(0, 0, 0);
buf.vel_mid[i] = make_float3(0, 0, 0);
//buf.mclr[i] = COLORA(1,1,1,0.8);
}
//buf.maccel[i] = make_float3(0, 0, 0);
//buf.mforce[i] = make_float3(0, 0, 0);
}
void LeapFrogIntegration(float time){
hipLaunchKernelGGL(( updateVelocity), dim3(fcuda.numBlocks), dim3(fcuda.numThreads), 0, 0, time, fbuf, fcuda.pnum);
hipDeviceSynchronize();
updatePosition << <fcuda.numBlocks, fcuda.numThreads >> >(time, fbuf, fcuda.pnum);
hipDeviceSynchronize();
}
//****An Implicit SPH Formulation for Incompressible Linearly Elastic Solids*************
__global__ void ComputeMap(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
int elasticIndex = buf.elasticID[i];
int j = 0;
for(int l=0;l<buf.neighborNum[elasticIndex];++l)
{
j = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
for(int k=0;k<buf.neighborNum[j];++k)
if(elasticIndex == buf.neighborID[j*simData.maxNeighborNum +k])
{
//if (elasticIndex == 1600)
//{
// printf("elastic id: %d,neighborID:%d\n", buf.elasticID[i], j);
//}
buf.neighborIndex[elasticIndex * simData.maxNeighborNum + l] = k;
break;
}
}
//if (elasticIndex == 1600)
// printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
}
//compute only once
__global__ void ComputeCorrectL(bufList buf,int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
gc -= nadj;
float correctL[9];
for (int l = 0; l < 9; ++l)
correctL[l] = 0;
int index = 0;
int jndex, j;
int elasticIndex = buf.elasticID[i];
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float pmterm;
float3 dist, vmr;
//if(elasticIndex == 1600)
//printf("particle %d's elasticIndex is %d\n", i, elasticIndex);
//if (elasticIndex >= simData.numElasticPoints)
//printf("elasticIndex = %d and limit %d\n", elasticIndex, simData.numElasticPoints);
//fbuf.elasticID[elasticIndex] = elasticIndex;
//buf.initialVolume[elasticIndex] = buf.mf_restmass[i] * buf.mdensity[i];
for (int l = 0; l < buf.neighborNum[elasticIndex]; l++)
{
jndex = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
dsq = sqrt(dsq*d2);
c = simData.psmoothradius - dsq;
dist *= simData.psimscale;
pmterm = buf.initialVolume[jndex] * simData.spikykern * c * c / dsq;
//pmterm = buf.initialVolume[jndex] * simData.spikykern * c * c;//v_j 0
correctL[0] += -pmterm * dist.x*dist.x; correctL[1] += -pmterm * dist.x*dist.y; correctL[2] += -pmterm * dist.x*dist.z;
correctL[3] += -pmterm * dist.y*dist.x; correctL[4] += -pmterm * dist.y*dist.y; correctL[5] += -pmterm * dist.y*dist.z;
correctL[6] += -pmterm * dist.z*dist.x; correctL[7] += -pmterm * dist.z*dist.y; correctL[8] += -pmterm * dist.z*dist.z;
}
if (det(correctL) != 0) {
/*if (i % 1000 == 0)
printf("particle %d's L is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n", i,
correctL[0], correctL[1], correctL[2],
correctL[3], correctL[4], correctL[5],
correctL[6], correctL[7], correctL[8]);*/
InverseMatrix3(correctL);
/*if (elasticIndex == 0)
printf("particle %d's inverseL is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n", i,
correctL[0], correctL[1], correctL[2],
correctL[3], correctL[4], correctL[5],
correctL[6], correctL[7], correctL[8]);*/
}
else
printf("ERROR:particle %d's correctL cannot be inversed! neighbor num is %d, correctL is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
i, buf.neighborNum[elasticIndex], correctL[0], correctL[1],correctL[2],correctL[3],
correctL[4],correctL[5],correctL[6],correctL[7],correctL[8]);
// float3 dist;
// float c;
// int jndex;
for(int l=0;l<buf.neighborNum[elasticIndex];++l)
{
dist = buf.neighborDistance[elasticIndex * simData.maxNeighborNum + l];
dsq = sqrt(dot(dist, dist));
c = simData.psmoothradius - dsq;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].x = correctL[0] * dist.x + correctL[1] * dist.y + correctL[2] * dist.z;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].y = correctL[3] * dist.x + correctL[4] * dist.y + correctL[5] * dist.z;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].z = correctL[6] * dist.x + correctL[7] * dist.y + correctL[8] * dist.z;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].x *= simData.spikykern *c *c/dsq;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].y *= simData.spikykern *c *c/dsq;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].z *= simData.spikykern *c *c/dsq;
//jndex = buf.neighborID[elasticIndex];
//buf.initialVolume[elasticIndex] += simData.poly6kern * pow(c, 3) * buf.mf_restmass[i] * buf.mdensity[buf.particleID[jndex]];
}
buf.frame[elasticIndex] = 0;
//if (i % 1000 == 0)
// printf("initial volume is %f\n", 1000000*buf.initialVolume[elasticIndex]);
}
__global__ void CheckCorrectedKernelGradientError(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
int index = buf.elasticID[i];
int jndex, j;
float3 dist;
float check[9] = {0,0,0,0,0,0,0,0,0};
float temp[9];
//printf("particle %d's elasticIndex is %d\n", i, index);
//if(index == 1600)
// printf("initial neighbor num is %d\n", buf.neighborNum[index]);
for(int l=0;l<buf.neighborNum[index];++l)
{
jndex = buf.neighborID[index * simData.maxNeighborNum + l];
dist = -buf.neighborDistance[index * simData.maxNeighborNum + l];
//if (index == 100)
// printf("initial dist with %d is (%f,%f,%f)\n", jndex,dist.x, dist.y, dist.z);
/* if (index == 100 && jndex == 99)
printf("initial dist is %f,%f,%f\n", dist.x, dist.y, dist.z);*/
dist *= buf.initialVolume[jndex];
/*if (index == 100 && jndex == 99)
printf("initial kernel is %f,%f,%f\n", elasticInfo.kernelGrad[index * 600 + l].x, elasticInfo.kernelGrad[index * 600 + l].y, elasticInfo.kernelGrad[index * 600 + l].z);
*/
/*if (index == 100 && elasticInfo.neighborID[index * 600 + l] == 99)
printf("initial volume is %.15f\n", elasticInfo.initialVolume[jndex]);*/
tensorProduct(dist, buf.kernelGrad[index * simData.maxNeighborNum + l], temp);
for (int k = 0; k < 9; ++k)
check[k] += temp[k];
}
if (index == 1600)
printf("checkError is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
check[0], check[1], check[2],
check[3], check[4], check[5],
check[6], check[7], check[8]);
}
__device__ void contributeVolume(int i, int cell, bufList buf, int& index, float& volume)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
int j, jndex;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
int elasticIndex = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 2 || (_example == 2 && buf.MFtype[j] >= 2))
{
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
if (index >= simData.maxNeighborNum)
return;
dsq = sqrt(dsq*d2);
c = simData.psmoothradius - dsq;
jndex = buf.elasticID[j];
buf.neighborID[elasticIndex * simData.maxNeighborNum + index] = jndex;
dist *= simData.psimscale;
buf.neighborDistance[elasticIndex * simData.maxNeighborNum + index] = dist;
volume += pow(buf.mf_restmass[j] * buf.density_solid[j], 2)
* simData.poly6kern * pow((r2*d2 - dsq*dsq), 3);
index++;
}
}
}
__global__ void ComputeInitialVolume(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
gc -= nadj;
int index = 0;
int elasticIndex = buf.elasticID[i];
buf.initialVolume[elasticIndex] = 0;
buf.particleID[elasticIndex] = i;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributeVolume(i, gc + simData.gridAdj[c], buf, index, buf.initialVolume[elasticIndex]);
if (index >= simData.maxNeighborNum)
printf("ERROR:Neighbor space is not enough!\n");
}
//buf.initialVolume[elasticIndex] = pow(simData.psmoothradius / 2, 3);
//buf.initialVolume[elasticIndex] +=
// pow(buf.mf_restmass[i] * buf.density_solid[elasticIndex], 2)*pow(simData.r2, 3)*simData.poly6kern;
//if(elasticIndex%1000==0)
//printf("elastic particle %d's initial volume is %.10f\n", elasticIndex, buf.initialVolume[elasticIndex]);
buf.neighborNum[elasticIndex] = index;
//if (buf.mpos[i].y > 20)
// buf.isHead[elasticIndex] = 1;
//else
// buf.isHead[elasticIndex] = 0;
//if (elasticIndex % 1000 == 0)
// printf("elastic particle %d's rest mass is %f, solid density is %f\n", elasticIndex, buf.mf_restmass[i], buf.density_solid[elasticIndex]);
}
void ComputeCorrectLCUDA()
{
ComputeInitialVolume << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: computeInitialVolume: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeCorrectL << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: computeCorrectL: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeMap << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: computeMap: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
CheckCorrectedKernelGradientError << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: checkCKGradError: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
//testFunc << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = hipGetLastError();
//if (error != hipSuccess) {
// fprintf(stderr, "CUDA ERROR: checkCKGradError: %s\n", hipGetErrorString(error));
//}
//hipDeviceSynchronize();
}
__device__ float contributeTest(int i, int cell, bufList buf)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
//if (i % 100 == 0)
// printf("particle %d's gridcnt is %d\n", i,buf.mgridcnt[cell]);
if (buf.mgridcnt[cell] == 0) return 0;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
int elasticIndex = buf.elasticID[i];
int jndex;
float sum = 0;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
c = (r2 - dsq)*d2;
sum += buf.mf_restmass[j] / buf.mf_restdensity[j]* simData.poly6kern * pow(c, 3);
}
return sum;
}
__global__ void testFunc(bufList buf,int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
//if (buf.MFtype[i] != 1) return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
if (gc == GRID_UNDEF) {
//buf.mpos[i] = make_float3(-1000, -1000, -1000);
//buf.mvel[i] = make_float3(0, 0, 0);
return;
}
gc -= nadj;
float sum = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
sum += contributeTest(i, gc + simData.gridAdj[c], buf);
}
if (i % 1000 == 0)
printf("test sum is %f\n", sum);
//if (buf.MFtype[i] != 1) return;
//printf("particle %d is an elastic particle,ID is %d\n", i,buf.elasticID[i]);
}
__global__ void ComputeDeformGrad(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
for (int l = 0; l < 9; ++l)
buf.gradDeform[i*9+l] = 0;
float3 dist,grad;
int elasticIndex = buf.elasticID[i];
if (buf.particleID[elasticIndex] != i)
printf("map error!id is %d, i is %d\n", buf.particleID[elasticIndex], i);
//elasticInfo.particleID[elasticIndex] = i;
float tempDG[9];
int jndex, j;
//if(elasticIndex == 100)
// printf("now neighbor num is %d\n", elasticInfo.neighborNum[elasticIndex]);
for (int l = 0; l<buf.neighborNum[elasticIndex]; ++l)
{
jndex = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
if(buf.elasticID[j]!=jndex)
{
printf("map error!\n");
continue;
}
dist = (buf.mpos[j] - buf.mpos[i]) * simData.psimscale;
//if (elasticIndex == 100)
// printf("now dist with %d is (%f,%f,%f)\n", jndex, dist.x, dist.y, dist.z);
dist *= buf.initialVolume[buf.neighborID[elasticIndex * simData.maxNeighborNum + l]];
/* if (elasticIndex == 100 && elasticInfo.neighborID[elasticIndex * 600 + l] == 99)
printf("now dist is %f,%f,%f\n", dist.x, dist.y, dist.z);*/
/*if (elasticIndex == 100 && elasticInfo.neighborID[elasticIndex * 600 + l] == 99)
printf("now kernel is %f,%f,%f\n", elasticInfo.kernelGrad[elasticIndex * 600 + l].x, elasticInfo.kernelGrad[elasticIndex * 600 + l].y, elasticInfo.kernelGrad[elasticIndex * 600 + l].z);*/
/*if (elasticIndex == 100 && elasticInfo.neighborID[elasticIndex * 600 + l] == 99)
printf("now volume is %.15f\n", elasticInfo.initialVolume[jndex]);*/
grad = buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l];
tensorProduct(dist, grad, tempDG);
for (int k = 0; k < 9; ++k)
buf.gradDeform[i*9+k] += tempDG[k];
}
//if (buf.elasticID[i] == 1600)
// printf("particle %d's deform grad is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n", elasticIndex,
// buf.gradDeform[i * 9],
// buf.gradDeform[i * 9 + 1], buf.gradDeform[i * 9 + 2], buf.gradDeform[i * 9 + 3],
// buf.gradDeform[i * 9 + 4], buf.gradDeform[i * 9 + 5], buf.gradDeform[i * 9 + 6],
// buf.gradDeform[i * 9 + 7], buf.gradDeform[i * 9 + 8]);
float q[9] = { 1,0,0,0,1,0,0,0,1 };
float error = 0;
float3 t;
extractRotation(&buf.gradDeform[i * 9], q, 100);
//if (i == 37000)
// printf("q is (%f,%f,%f,%f)\n", q[0], q[1], q[2], q[3]);
for (int l = 0; l < 9; ++l)
buf.Rotation[i * 9 + l] = q[l];
for (int l = 0; l<buf.neighborNum[elasticIndex]; ++l)
{
buf.kernelRotate[elasticIndex * simData.maxNeighborNum + l] =
multiply_mv3(&buf.Rotation[i * 9], buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l]);
}
/*if (buf.elasticID[i] == 100)
printf("delta error is %f\n", error);*/
/*if (buf.elasticID[i] == 1600)
printf("particle %d's rotation is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n", i,
buf.Rotation[i * 9],
buf.Rotation[i * 9 + 1], buf.Rotation[i * 9 + 2], buf.Rotation[i * 9 + 3],
buf.Rotation[i * 9 + 4], buf.Rotation[i * 9 + 5], buf.Rotation[i * 9 + 6],
buf.Rotation[i * 9 + 7], buf.Rotation[i * 9 + 8]);*/
}
__global__ void ComputeFinalDeformGrad(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
float3 rotatedKernelGrad;
//compute corotated deformation gradient
int elasticIndex = buf.elasticID[i];
if (elasticIndex < 0 || elasticIndex >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
float3 grad,dist;
float deformGrad[9];
for(int k=0;k<9;++k)
buf.gradDeform[i * 9 + k] = 0;
int j, jndex;
for (int l = 0; l<buf.neighborNum[elasticIndex]; ++l)
{
jndex = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
grad = buf.kernelRotate[elasticIndex * simData.maxNeighborNum + l];
dist = buf.mpos[j] - buf.mpos[i];
dist *= simData.psimscale;
//dist -= multiply_mv3(&buf.Rotation[i * 9], -elasticInfo.neighborDistance[elasticIndex * 600 + l]);
dist *= buf.initialVolume[jndex];
tensorProduct(dist, grad, deformGrad);
for (int k = 0; k < 9; ++k)
buf.gradDeform[i * 9 + k] += deformGrad[k];
}
//if (elasticIndex == 1600)
// printf("final deform gradient is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
// buf.gradDeform[i * 9], buf.gradDeform[i * 9 + 1], buf.gradDeform[i * 9 + 2],
// buf.gradDeform[i * 9 + 3], buf.gradDeform[i * 9 + 4], buf.gradDeform[i * 9 + 5],
// buf.gradDeform[i * 9 + 6], buf.gradDeform[i * 9 + 7], buf.gradDeform[i * 9 + 8]);
/*buf.gradDeform[i * 9] += 1;
buf.gradDeform[i * 9 + 4] += 1;
buf.gradDeform[i * 9 + 8] += 1;*/
////
//float test[9] = { 0,0,0,0,0,0,0,0,0 };
//for (int l = 0; l<buf.neighborNum[elasticIndex]; ++l)
//{
// jndex = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
// j = buf.particleID[jndex];
// grad = buf.kernelRotate[elasticIndex * simData.maxNeighborNum + l];
// dist = multiply_mv3(&buf.Rotation[i * 9], -buf.neighborDistance[elasticIndex * simData.maxNeighborNum + l]);
// dist *= buf.initialVolume[buf.neighborID[elasticIndex * simData.maxNeighborNum + l]];
// tensorProduct(dist, grad, deformGrad);
// for (int k = 0; k < 9; ++k)
// test[k] += deformGrad[k];
//}
//if (elasticIndex == 100)
// printf("test matrix is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
// test[0], test[1], test[2],
// test[3], test[4], test[5],
// test[6], test[7], test[8]);
}
__global__ void ComputeStrainAndStress(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF) {
//buf.mpos[i] = make_float3(-1000, -1000, -1000);
//buf.mvel[i] = make_float3(0, 0, 0);
return;
}
float3 rotatedKernelGrad;
//compute corotated deformation gradient
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
float3 grad, dist;
float deformGrad[9];
for (int k = 0; k<9; ++k)
buf.gradDeform[i * 9 + k] = 0;
int j, jndex;
for (int l = 0; l<buf.neighborNum[index]; ++l)
{
jndex = buf.neighborID[index * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
grad = buf.kernelRotate[index * simData.maxNeighborNum + l];
dist = buf.mpos[j] - buf.mpos[i];
dist *= simData.psimscale;
dist *= buf.initialVolume[jndex];
tensorProduct(dist, grad, deformGrad);
for (int k = 0; k < 9; ++k)
buf.gradDeform[i * 9 + k] += deformGrad[k];
}
//strain and stress
float strain[9], stress[9];
float alpha;
transmit3(&buf.gradDeform[i * 9], stress);
for (int l = 0; l < 9; ++l)
strain[l] = 0.5*(buf.gradDeform[i * 9 + l] + stress[l]);
strain[0] -= 1; strain[4] -= 1; strain[8] -= 1;
buf.volumetricStrain[index] = strain[0] + strain[4] + strain[8];
float lambda = simData.lambda;
float tr_strain = strain[0] + strain[4] + strain[8];
for (int l = 0; l < 9; ++l)
stress[l] = 2 * simData.miu * strain[l];
stress[0] += lambda * tr_strain; stress[4] += lambda * tr_strain; stress[8] += lambda * tr_strain;
alpha = simData.poroDeformStrength*(1 - simData.bulkModulus_porous / simData.bulkModulus_grains) * buf.pressure_water[i*MAX_FLUIDNUM];
stress[0] -= alpha;
stress[4] -= alpha;
stress[8] -= alpha;
for (int l = 0; l < 9; ++l)
buf.gradDeform[i * 9 + l] = stress[l];
}
__global__ void ComputeElasticForce(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
//buf.mpos[i] = make_float3(-1000, -1000, -1000);
//buf.mvel[i] = make_float3(0, 0, 0);
return;
}
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
int j, jndex, k;
float3 force = make_float3(0, 0, 0);
float3 t1, t2;
for (int l = 0; l<buf.neighborNum[index]; ++l)
{
jndex = buf.neighborID[index * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
k = buf.neighborIndex[index * simData.maxNeighborNum + l];
t1 = multiply_mv3(&buf.gradDeform[i * 9], buf.kernelRotate[index * simData.maxNeighborNum + l]);
t1 -= multiply_mv3(&buf.gradDeform[j * 9], buf.kernelRotate[jndex * simData.maxNeighborNum + k]);
t1 *= buf.initialVolume[index];
t1 *= buf.initialVolume[jndex];
force += t1;
}
//if (index % 30000 == 0)
// printf("solid particle %d's elastic force is (%f,%f,%f)\n", index, force.x, force.y, force.z);
//buf.mforce[i] += force;
//buf.maccel[i] += force;
buf.bx[index] = buf.mveleval[i].x + simData.mf_dt*force.x / buf.mf_restmass[i];
buf.by[index] = buf.mveleval[i].y + simData.mf_dt*force.y / buf.mf_restmass[i];
buf.bz[index] = buf.mveleval[i].z + simData.mf_dt*force.z / buf.mf_restmass[i];
//if (index % 10000 == 0)
// printf("b is (%f,%f,%f)\n", buf.bx[index], buf.by[index], buf.bz[index]);
buf.vx[index] = buf.mveleval[i].x; buf.vy[index] = buf.mveleval[i].y; buf.vz[index] = buf.mveleval[i].z;
}
__global__ void ComputeIterationStrainAndStress(bufList buf, int pnum, float* px, float*py, float*pz)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
float3 rotatedKernelGrad;
//compute corotated deformation gradient
int elasticIndex = buf.elasticID[i];
if (elasticIndex < 0 || elasticIndex >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
float3 grad, dist;
float deformGrad[9];
for (int k = 0; k<9; ++k)
buf.gradDeform[i * 9 + k] = 0;
int j, jndex;
int index = buf.elasticID[i];
for (int l = 0; l<buf.neighborNum[elasticIndex]; ++l)
{
jndex = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
grad = buf.kernelRotate[elasticIndex * simData.maxNeighborNum + l];
//dist = buf.mpos[j] - buf.mpos[i];
//dist *= simData.psimscale;
dist = make_float3(px[jndex] - px[elasticIndex], py[jndex] - py[elasticIndex], pz[jndex] - pz[elasticIndex]) * simData.mf_dt;
//dist -= multiply_mv3(&buf.Rotation[i * 9], -elasticInfo.neighborDistance[elasticIndex * 600 + l]);
dist *= buf.initialVolume[jndex];
tensorProduct(dist, grad, deformGrad);
for (int k = 0; k < 9; ++k)
buf.gradDeform[i * 9 + k] += deformGrad[k];
}
//strain and stress
float strain[9], stress[9];
float alpha;
transmit3(&buf.gradDeform[i * 9], stress);
for (int l = 0; l < 9; ++l)
strain[l] = 0.5*(buf.gradDeform[i * 9 + l] + stress[l]);
//strain[0] -= 1; strain[4] -= 1; strain[8] -= 1;
buf.volumetricStrain[index] = strain[0] + strain[4] + strain[8];
float lambda = simData.lambda;
float tr_strain = strain[0] + strain[4] + strain[8];
for (int l = 0; l < 9; ++l)
stress[l] = 2 * simData.miu * strain[l];
stress[0] += lambda * tr_strain; stress[4] += lambda * tr_strain; stress[8] += lambda * tr_strain;
alpha = simData.poroDeformStrength*(1 - simData.bulkModulus_porous / simData.bulkModulus_grains) * buf.pressure_water[i*MAX_FLUIDNUM];
stress[0] -= alpha;
stress[4] -= alpha;
stress[8] -= alpha;
for (int l = 0; l < 9; ++l)
buf.gradDeform[i * 9 + l] = stress[l];
}
__global__ void ComputeIterationElasticForce(bufList buf, int pnum, float* px, float*py, float*pz)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
int j, jndex, k;
float3 force = make_float3(0, 0, 0);
float3 t1, t2;
for (int l = 0; l<buf.neighborNum[index]; ++l)
{
jndex = buf.neighborID[index * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
k = buf.neighborIndex[index * simData.maxNeighborNum + l];
t1 = multiply_mv3(&buf.gradDeform[i * 9], buf.kernelRotate[index * simData.maxNeighborNum + l]);
t1 -= multiply_mv3(&buf.gradDeform[j * 9], buf.kernelRotate[jndex * simData.maxNeighborNum + k]);
t1 *= buf.initialVolume[index];
t1 *= buf.initialVolume[jndex];
force += t1;
}
buf.Apx[index] = px[index] - simData.mf_dt*force.x / buf.mf_restmass[i];
buf.Apy[index] = py[index] - simData.mf_dt*force.y / buf.mf_restmass[i];
buf.Apz[index] = pz[index] - simData.mf_dt*force.z / buf.mf_restmass[i];
}
__global__ void initElasticIteration(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int index = buf.elasticID[i];
buf.px[index] = buf.rx[index] = buf.bx[index] - buf.Apx[index];
buf.py[index] = buf.ry[index] = buf.by[index] - buf.Apy[index];
buf.pz[index] = buf.rz[index] = buf.bz[index] - buf.Apz[index];
}
__global__ void updateV(bufList buf, int pnum, float3 alpha)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int index = buf.elasticID[i];
buf.vx[index] += alpha.x * buf.px[index]; buf.vy[index] += alpha.y * buf.py[index]; buf.vz[index] += alpha.z * buf.pz[index];
buf.r2x[index] = buf.rx[index] - alpha.x*buf.Apx[index];
buf.r2y[index] = buf.ry[index] - alpha.y*buf.Apy[index];
buf.r2z[index] = buf.rz[index] - alpha.z*buf.Apz[index];
}
__global__ void updateP(bufList buf, int pnum, float3 beta)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int index = buf.elasticID[i];
buf.px[index] = buf.r2x[index] + beta.x*buf.px[index];
buf.py[index] = buf.r2y[index] + beta.y*buf.py[index];
buf.pz[index] = buf.r2z[index] + beta.z*buf.pz[index];
buf.rx[index] = buf.r2x[index]; buf.ry[index] = buf.r2y[index]; buf.rz[index] = buf.r2z[index];
}
__global__ void ApplyElasticForce(bufList buf, int pnum, float* vx, float*vy, float*vz)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
float3 force;
force.x = (vx[index] - buf.mveleval[i].x) / simData.mf_dt;
force.y = (vy[index] - buf.mveleval[i].y) / simData.mf_dt;
force.z = (vz[index] - buf.mveleval[i].z) / simData.mf_dt;
buf.pressForce[i] = force;
buf.mforce[i] += force;
buf.maccel[i] += force;
}
__device__ float contributeColorField(int i, int cell, bufList buf, int& count)
{
float dsq, c, sum=0;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
float cmterm;
int j, jndex;
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
int index = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 2 || (_example == 2 && buf.MFtype[j] >= 2))
{
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dsq = sqrt(dsq*d2);
jndex = buf.elasticID[j];
c = pow(simData.r2 - dsq*dsq, 3);
cmterm = buf.mf_restmass[j] * buf.density_solid[j]*c*simData.poly6kern;
sum += cmterm;
count++;
}
}
return sum;
}
__global__ void ComputeElasticColorField(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
gc -= nadj;
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
buf.colorValue[i] = 0;
int count = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
buf.colorValue[i] += contributeColorField(i, gc + simData.gridAdj[c], buf, count);
}
if (count <= 25)
//if(count<=20)
buf.isSurface[index] = 1;
else
buf.isSurface[index] = 0;
}
__device__ float3 contributeElasticNormal(int i, int cell, bufList buf)
{
float dsq, c;
float3 sum = make_float3(0,0,0);
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
float cmterm;
int j, jndex;
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
int index = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 2 || (_example == 2 && buf.MFtype[j] >= 2))
{
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
jndex = buf.elasticID[j];
dsq = sqrt(dsq*d2);
dist *= simData.psimscale;
jndex = buf.elasticID[j];
c = simData.psmoothradius - dsq;
cmterm = buf.mf_restmass[j] * buf.density_solid[j] * c*c / dsq*simData.spikykern;
sum += cmterm * (buf.colorValue[j] - buf.colorValue[i])*dist;
}
}
return sum;
}
__global__ void ComputeElasticNormal(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
gc -= nadj;
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
buf.normal[index] = make_float3(0,0,0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
buf.normal[index] -= contributeElasticNormal(i, gc + simData.gridAdj[c], buf);
}
float d = dot(buf.normal[index], buf.normal[index]);
if (d != 0)
buf.normal[index] /= sqrt(d);
}
void ComputeElasticForceCUDA()
{
ComputeDeformGrad << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: computeCorrectL: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeStrainAndStress << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute strain and stress: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeElasticForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute elastic force: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
int countNum = 0;
float errorIter, precision = 0.01;
float3 alpha, beta;
hipblasHandle_t handle;
hipblasCreate(&handle);
ComputeIterationStrainAndStress << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, fbuf.vx, fbuf.vy, fbuf.vz);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute iteration strain and stress: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeIterationElasticForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, fbuf.vx, fbuf.vy, fbuf.vz);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute iteration elastic force: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
initElasticIteration << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: init elastic iteration: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
float al = -1, t1, t2, t3;
do {
countNum++;
ComputeIterationStrainAndStress << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, fbuf.px, fbuf.py, fbuf.pz);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute iteration strain and stress: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeIterationElasticForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, fbuf.px, fbuf.py, fbuf.pz);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute iteration elastic force: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.rx, 1, fbuf.rx, 1, &(alpha.x));
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.ry, 1, fbuf.ry, 1, &(alpha.y));
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.rz, 1, fbuf.rz, 1, &(alpha.z));
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.px, 1, fbuf.Apx, 1, &(beta.x));
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.py, 1, fbuf.Apy, 1, &(beta.y));
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.pz, 1, fbuf.Apz, 1, &(beta.z));
hipDeviceSynchronize();
alpha /= beta;
updateV << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, alpha);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute update V: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
//t1 = 0; t2 = 0; t3 = 0;
hipblasSasum(handle, fcuda.numElasticPoints, fbuf.r2x, 1, &t1);
hipblasSasum(handle, fcuda.numElasticPoints, fbuf.r2y, 1, &t2);
hipblasSasum(handle, fcuda.numElasticPoints, fbuf.r2z, 1, &t3);
hipDeviceSynchronize();
errorIter = t1 + t2 + t3;
if (errorIter < precision)
break;
//printf("iter num is %d, error is %f\n", countNum, errorIter);
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.r2x, 1, fbuf.r2x, 1, &(beta.x));
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.r2y, 1, fbuf.r2y, 1, &(beta.y));
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.r2z, 1, fbuf.r2z, 1, &(beta.z));
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.rx, 1, fbuf.rx, 1, &(alpha.x));
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.ry, 1, fbuf.ry, 1, &(alpha.y));
hipblasSdot(handle, fcuda.numElasticPoints, fbuf.rz, 1, fbuf.rz, 1, &(alpha.z));
hipDeviceSynchronize();
beta /= alpha;
updateP << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, beta);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute update V: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
} while (countNum < 20);
//ex1 for 5, ex2 for 5
//printf("\n");
ApplyElasticForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, fbuf.vx, fbuf.vy, fbuf.vz);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: apply elastic force: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeElasticColorField << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute elastic color field: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeElasticNormal << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute elastic normal: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
hipblasDestroy(handle);
}
__device__ float contributeDivDarcyFlux(int i, int cell, bufList buf, float&normalize)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
//int jndex;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
float sum = 0;
//if (i % 100 == 0)
// printf("particle %d's gridcnt is %d\n", i,buf.mgridcnt[cell]);
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
//int index = buf.elasticID[i];
//int jndex,index = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.misbound[j])
continue;
if (buf.MFtype[i] == buf.MFtype[j] && buf.MFtype[i] == 0)
continue;
//jndex = buf.elasticID[j];
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dsq = sqrt(dsq);
c = simData.psmoothradius - dsq;
//cmterm = c*c*simData.spikykern * simData.pmass / buf.mf_restdensity[j] / dsq;
//cmterm = -1 / dsq;
cmterm = c*c*simData.spikykern * simData.pmass / simData.mf_dens[1];
//cmterm = c*c*simData.spikykern * simData.pmass * buf.density_solid[buf.elasticID[j]] / dsq;
//if (buf.MFtype[i] == buf.MFtype[j])
sum += dot((buf.gradPressure[j]+ buf.gradPressure[i])*0.5, dist/dsq)*cmterm;
normalize += cmterm;
//else
// sum += dot(buf.gradPressure[i], dist)*cmterm;
}
return sum;
}
__device__ void contributePorePressure(int i, int cell, bufList buf,float* beta, float &sum, float&b)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float3 pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
//int index = buf.elasticID[i];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] != 0)
continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
q = sqrt(dsq / r2);
if (q <= 0 || q >= 1)
continue;
if (q < 0.5)
cmterm = 6 * (q*q*q - q*q) + 1;
else
cmterm = 2 * pow(1 - q, 3);
//if (q >= 1)
// continue;
//if (q >= 0 && q <= 0.5)
// cmterm = buf.density_solid[i] * (6 * (q*q*q - q*q) + 1);
//else
// cmterm = buf.density_solid[i] * 2*pow(1-q,3);
if (buf.totalDis[j*MAX_SOLIDNUM + buf.MFtype[i] - 2] <= 0.000001)
b = 1;
cmterm *= buf.mf_restmass[j] / buf.mf_restdensity[j];
//cmterm = pow((r2 - dsq), 3)*simData.poly6kern*buf.mf_restmass[j] * buf.mdensity[j] / buf.totalDis[j];
/*if (isnan(cmterm))
continue;*/
//cmterm *= buf.mf_restmass[j] / buf.mf_restdensity[j];
//if (buf.totalDis[j*MAX_SOLIDNUM + buf.MFtype[i] - 2] == 0)
// continue;
cmterm /= buf.totalDis[j*MAX_SOLIDNUM + buf.MFtype[i] - 2];
for (int k = 1; k < MAX_FLUIDNUM; ++k)
{
//for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
sum += (buf.mf_beta[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+buf.MFtype[i]-2] * simData.mf_dens[k] / simData.mf_mass[k]) * cmterm;
beta[k] += (buf.mf_beta[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] * simData.mf_dens[k] / simData.mf_mass[k]) * cmterm;
if (isnan(sum))
{
b = buf.mf_restdensity[j];
return;
}
}
/* sum += buf.mf_beta[j*MAX_FLUIDNUM + k] * cmterm;
beta[k] += buf.mf_beta[j*MAX_FLUIDNUM + k] * cmterm;*/
}
}
}
__global__ void ComputeSolidPorePressure(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i]==1)return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
if (gc == GRID_UNDEF) {
return;
}
gc -= nadj;
float fluidSum = 0;
float beta[MAX_FLUIDNUM];
float normalize = 0;
//if (i % 10000 == 0)
// printf("pressure ratio is (%f,%f,%f,%f),(%f,%f,%f,%f),(%f,%f,%f,%f) \n",
// simData.pressRatio[4], simData.pressRatio[5], simData.pressRatio[6], simData.pressRatio[7]
// , simData.pressRatio[8], simData.pressRatio[9], simData.pressRatio[10], simData.pressRatio[11]
// , simData.pressRatio[12], simData.pressRatio[13], simData.pressRatio[14], simData.pressRatio[15]);
//if(buf.MFtype[i] == 0)
//printf("%d's type is %d, beta is (%f,%f,%f)\n", i, buf.MFtype[i], beta[0], beta[1],
// beta[2]);
float b = 10;
if (buf.MFtype[i] > 1)
{
for (int k = 0; k < MAX_FLUIDNUM; ++k)
beta[k] = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributePorePressure(i, gc + simData.gridAdj[c], buf, beta, fluidSum, b);
}
/*if (fluidSum > 0.1)
printf("fluid sum is %f, beta is (%f,%f,%f,%f)\n",
fluidSum, beta[0], beta[1], beta[2], beta[3]);*/
for (int k = 0; k < MAX_FLUIDNUM; ++k)
{
//if (buf.MFtype[i] == 2||(_example==2))
if (buf.MFtype[i] == 2)
buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+buf.MFtype[i]-2] =
simData.CoCompressibility*(fluidSum - (1 - simData.bulkModulus_porous / simData.bulkModulus_grains)*buf.volumetricStrain[buf.elasticID[i]]);
else
buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] = simData.CoCompressibility*fluidSum;
if (isnan(buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2]))
printf("solid %d's pore pressure is nan.beta is (%f,%f,%f) density solid is %f, b is %.10f\n",
i, beta[1], beta[2], beta[3], buf.density_solid[i], b);
//if(buf.mpos[i].y>60&&i%10==0)
// printf("press water is %f\n", buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2]);
buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] = beta[k];
}
float mass = simData.mf_mass[0];
for (int k = 1; k < MAX_FLUIDNUM; ++k)
mass += simData.stRatio*buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] * simData.mf_mass[buf.MFtype[i] - 2];
buf.mf_restmass[i] = mass;
/*if(buf.elasticID[i]%1000==0&& abs(buf.volumetricStrain[buf.elasticID[i]])>0.001)
printf("elastic %d's volume strain is %f\n", buf.elasticID[i],
buf.volumetricStrain[buf.elasticID[i]]);*/
}
else
{
for (int k = 1; k < MAX_FLUIDNUM; ++k)
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = simData.pressRatio[k*MAX_SOLIDNUM + l] * simData.rest_porosity*simData.CoCompressibility;// *buf.mf_alpha[i*MAX_FLUIDNUM + k];
//if (i % 10000 == 0)
// printf("%d's press water is %f\n", i, buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]);
}//buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + 0] = (simData.pressRatio[1]*buf.mf_beta[i*MAX_FLUIDNUM+1]+simData.pressRatio[2]*buf.mf_beta[i*MAX_FLUIDNUM + 2]) * simData.rest_porosity*simData.CoCompressibility;
}
}
__device__ void findNearbySolid(int i, int cell, bufList buf)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float pmterm;
int j, jndex;
//int t = -1;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 1|| buf.MFtype[j] == 0)
continue;
//if (buf.isSurface[buf.elasticID[j]] == 0)
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq2 = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq2 < r2 && dsq2 > 0))
continue;
q = sqrt(dsq2 / r2);
if (q >= 0 && q <= 0.5)
buf.totalDis[i*MAX_SOLIDNUM + buf.MFtype[j] - 2] += (6 * (pow(q, 3) - pow(q, 2)) + 1);
else
buf.totalDis[i*MAX_SOLIDNUM + buf.MFtype[j] - 2] += 2 * pow(1 - q, 3);
buf.solidCount[i*MAX_SOLIDNUM + buf.MFtype[j] - 2] += 1;
//if (q > 2)
// continue;
//if (q >= 0 && q <= 1)
// buf.totalDis[i*MAX_SOLIDNUM+buf.MFtype[j]-2] += simData.CubicSplineKern2*(1 - 1.5*q*q*(1 - q / 2));
//else
// buf.totalDis[i*MAX_SOLIDNUM + buf.MFtype[j] - 2] += simData.CubicSplineKern1*pow(2 - q, 3);
//total_dist += pow((r2 - dsq2), 3)*simData.poly6kern*buf.mf_restmass[i] * buf.mdensity[i];
//total_dist += sqrt(dsq2);
}
}
__global__ void FindNearbySolid(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
for (int k = 0; k < MAX_SOLIDNUM; ++k) {
buf.totalDis[i*MAX_SOLIDNUM + k] = 0;
buf.solidCount[i*MAX_SOLIDNUM + k] = 0;
}
for (int c = 0; c < simData.gridAdjCnt; c++)
{
findNearbySolid(i, gc + simData.gridAdj[c], buf);
}
//for (int k = 0; k < MAX_SOLIDNUM; ++k)
// buf.totalDis[i*MAX_SOLIDNUM + k] *= buf.mf_restmass[i] * buf.mdensity[i];
//if (buf.solidCount[i] >= 25)
// buf.isInside[i] = true;
//else
// buf.isInside[i] = false;
float step;
float betasum = 0;
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
if (buf.solidCount[i*MAX_SOLIDNUM + l] == 0)
{
for (int k = 1; k < simData.mf_catnum; ++k)
{
step = (-buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]);
buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = 0;
buf.mf_beta_next[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = 0;
buf.mf_alpha[i*simData.mf_catnum + k] -= step;
buf.mf_alpha_next[i*simData.mf_catnum + k] -= step;
}
}
if(l == 3)
if (buf.solidCount[i*MAX_SOLIDNUM + l] >= 22)
{
for (int k = 1; k < simData.mf_catnum; ++k)
{
step = buf.mf_alpha[i*simData.mf_catnum + k];
buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] += step;
buf.mf_beta_next[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] += step;
buf.mf_alpha[i*simData.mf_catnum + k] = 0;
buf.mf_alpha_next[i*simData.mf_catnum + k] = 0;
}
buf.isInside[i] = true;
}
for (int k = 1; k < simData.mf_catnum; ++k)
betasum += buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
}
buf.mf_alpha_sum[i] = 0;
buf.mf_restdensity_out[i] = 0;
//buf.rest_colorValue[i] = 0;
for (int k = 1; k < simData.mf_catnum; ++k)
{
buf.mf_alpha_sum[i] += buf.mf_alpha[i*simData.mf_catnum + k];
buf.mf_restdensity_out[i] += buf.mf_alpha[i*simData.mf_catnum + k] * simData.mf_dens[k];
//buf.rest_colorValue[i] += buf.mf_alpha[i*simData.mf_catnum + k] * simData.colorValue[k];
}
if (abs(betasum + buf.mf_alpha_sum[i] - 1) > 0.01 || isnan(betasum))
printf("alphasum is %f, betasum is %f\n", buf.mf_alpha_sum[i], betasum);
if (buf.mf_alpha_sum[i] > 0.0001)
buf.mf_restdensity_out[i] /= buf.mf_alpha_sum[i];
else
{
buf.mf_restdensity_out[i] = 1;
buf.mf_alpha_sum[i] = 0;
}
////if (i % 10000 == 0)
//if(buf.mf_alpha_sum[i] < 0.99)
// printf("mf_dens is (%f,%f,%f,%f), alpha sum is %f, densityout is %f, alpha is (%f,%f,%f), solid count is (%d,%d,%d,%d),beta is (%f,%f,%f,%f)(%f,%f,%f,%f)(%f,%f,%f,%f)(%f,%f,%f,%f)\n",
// simData.mf_dens[0], simData.mf_dens[1], simData.mf_dens[2], simData.mf_dens[3], buf.mf_alpha_sum[i], buf.mf_restdensity_out[i],
// buf.mf_alpha[i*simData.mf_catnum + 1], buf.mf_alpha[i*simData.mf_catnum + 2], buf.mf_alpha[i*simData.mf_catnum + 3],
// buf.solidCount[i*MAX_SOLIDNUM + 0], buf.solidCount[i*MAX_SOLIDNUM + 1], buf.solidCount[i*MAX_SOLIDNUM + 2], buf.solidCount[i*MAX_SOLIDNUM + 3],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 0 * MAX_SOLIDNUM + 0], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 1 * MAX_SOLIDNUM + 0],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 2 * MAX_SOLIDNUM + 0], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 3 * MAX_SOLIDNUM + 0],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 0 * MAX_SOLIDNUM + 1],buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 1*MAX_SOLIDNUM + 1],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 2*MAX_SOLIDNUM + 1],buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 3*MAX_SOLIDNUM + 1],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 0 * MAX_SOLIDNUM + 2], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 1 * MAX_SOLIDNUM + 2],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 2 * MAX_SOLIDNUM + 2], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 3 * MAX_SOLIDNUM + 2],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 0 * MAX_SOLIDNUM + 3], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 1 * MAX_SOLIDNUM + 3],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 2 * MAX_SOLIDNUM + 3], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 3 * MAX_SOLIDNUM + 3]);
}
__device__ int findNearestSolid(int i, int cell, bufList buf, float*distance) {
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float pmterm;
int j, jndex;
int t = -1;
if (buf.mgridcnt[cell] == 0) return -1;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] <= 1)
continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq2 = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq2 < r2 && dsq2 > 0))
continue;
if (dsq2 < distance[buf.MFtype[j] - 2])
{
distance[buf.MFtype[j] - 2] = dsq2;
t = j;
}
}
return t;
}
__global__ void ComputeFPCorrection(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
buf.rest_colorValue[i] = simData.colorValue[0];
if (buf.MFtype[i] != 0)
return;
gc -= nadj;
float distance[MAX_SOLIDNUM];
for (int k = 0; k < MAX_SOLIDNUM; ++k) {
distance[k] = simData.r2;
}
int j = -1, t;
//buf.fluidPercent[i] = buf.nextFluidPercent[i];
float step;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
t = findNearestSolid(i, gc + simData.gridAdj[c], buf, distance);
/*if (t != -1)
j = t;*/
}
float oldFP;
}
__device__ void contributePoroVelocity(int i, int cell, bufList buf, float3* poroVel, float* normalize, float3* advectVel, int &count)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float pmterm;
int j, jndex;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q = 0;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] <= 1)
continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq2 = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
q = sqrt(dsq2 / r2);
if (q >= 1 || q <= 0)
continue;
if (q <= 0.5)
pmterm = 6 * (q*q*q - q*q) + 1;
else
pmterm = 2 * pow(1 - q, 3);
//if (q >= 2 || q <= 0)
// continue;
//if (q > 1)
// pmterm = 0.25*pow(2 - q, 3);
//else
// pmterm = 1 - 1.5*q*q*(1 - 0.5*q);
//pmterm *= buf.density_solid[j];
//pmterm *= simData.CubicSplineKern2;
for (int k = 1; k < simData.mf_catnum; k++)
{
if (isnan(dot(buf.gradPressure[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2], buf.gradPressure[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2])))
{
count++;
continue;
}
poroVel[k*MAX_SOLIDNUM + buf.MFtype[j] - 2] += pmterm * buf.gradPressure[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+ buf.MFtype[j] - 2];
advectVel[k*MAX_SOLIDNUM + buf.MFtype[j] - 2] += pmterm * buf.mveleval[j];
}
normalize[buf.MFtype[j] - 2] += pmterm;
}
return;
}
__global__ void ComputePoroVelocity(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float normalize[MAX_SOLIDNUM];// = 0;
float3 poroVel[MAX_FLUIDNUM * MAX_SOLIDNUM];
float3 advectVel[MAX_FLUIDNUM * MAX_SOLIDNUM];
float3 force, forcesum = make_float3(0,0,0);
float betadensity = 0;
float betasum = 0;
//buf.poroForce[i] = make_float3(0, 0, 0);
int count = 0;
for (int k = 1; k < simData.mf_catnum*MAX_SOLIDNUM; ++k)
{
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
poroVel[k*MAX_SOLIDNUM+l] = make_float3(0, 0, 0);
advectVel[k*MAX_SOLIDNUM + l] = make_float3(0, 0, 0);
betadensity += buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] * simData.mf_dens[k];
betasum += buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
normalize[l] = 0;
}
}
//if (buf.mf_restdensity[i] <= 10)
// printf("rest den222 is %f, alpha is (%f,%f,%f), betasum is %f\n",
// buf.mf_restdensity[i], buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2], buf.mf_alpha[i*MAX_FLUIDNUM + 3],
// betasum);
if (betadensity > 1)
betadensity /= betasum;
//int count = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributePoroVelocity(i, gc + simData.gridAdj[c], buf, poroVel, normalize, advectVel, count);
}
buf.poroForce[i] = make_float3(0, 0, 0);
float3 porevel, advectV;
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
if (normalize[l] != 0)
{
for (int k = 1; k < simData.mf_catnum; ++k)
{
porevel = poroVel[k*MAX_SOLIDNUM + l];
advectV = advectVel[k*MAX_SOLIDNUM + l];
poroVel[k*MAX_SOLIDNUM + l] /= buf.totalDis[i*MAX_SOLIDNUM + l];
advectVel[k*MAX_SOLIDNUM + l] /= buf.totalDis[i*MAX_SOLIDNUM + l];
//poroVel[k] /= abs(normalize);
//advectVel[k] /= abs(normalize);
buf.poroVel[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = poroVel[k*MAX_SOLIDNUM + l] + advectVel[k*MAX_SOLIDNUM + l];
//force = buf.mf_beta[i*MAX_FLUIDNUM + k]*(poroVel[k] - buf.mveleval[i])/simData.mf_dt;
force = simData.mf_dens[k] * buf.poroVel[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] - betadensity * buf.mveleval[i];
force *= buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l] / (simData.mf_dt*buf.mf_restdensity[i]);
//buf.mforce[i] += force;
forcesum += force;
//buf.poroForce[i] += force;
if (isnan(dot(force, force)))
printf("phase %d's pore force is nan,poro vel is (%f,%f,%f), advect vel is (%f,%f,%f), total dis is %f, count is %d\n", k,
k, porevel.x, porevel.y, porevel.z,
advectV.x, advectV.y, advectV.z,
buf.totalDis[i*MAX_SOLIDNUM + l], count);
}
//if (buf.mf_alpha[i*MAX_FLUIDNUM + 1] > 0.99 && dot(buf.poroForce[i], buf.poroForce[i]) > 1)
// printf("%d's alpha is (%f,%f), beta is (%f,%f), vel is (%f,%f,%f), poro force is (%f,%f,%f)\n",
// i, buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2],
// buf.mf_beta[i*MAX_FLUIDNUM + 1], buf.mf_beta[i*MAX_FLUIDNUM + 2], buf.mveleval[i].x, buf.mveleval[i].y,
// buf.mveleval[i].z, buf.poroForce[i].x, buf.poroForce[i].y, buf.poroForce[i].z);
}
else
{
for (int k = 1; k < simData.mf_catnum; ++k)
buf.poroVel[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = make_float3(0, 0, 0);
}
}
//if (isnan(dot(forcesum, forcesum)))
// printf("particle %d's type is %d, poro accel is nan, total distance is %f\n",
// i, buf.MFtype[i], buf.totalDis[i*MAX_SOLIDNUM + 3]);
//if (buf.MFtype[i] == 0 && i % 10000 == 0)
// printf("particle %d's mixture vel is (%f,%f,%f), fluid vel is (%f,%f,%f)\n",
// i, buf.mveleval[i].x, buf.mveleval[i].y, buf.mveleval[i].z,
// buf.fluidVel[i*MAX_FLUIDNUM + 1].x, buf.fluidVel[i*MAX_FLUIDNUM + 1].y,
// buf.fluidVel[i*MAX_FLUIDNUM + 1].z);
//betasum = forcesum.x*forcesum.x + forcesum.y*forcesum.y + forcesum.z*forcesum.z;
//if (betasum > simData.AL2) {
// forcesum *= simData.AL / sqrt(betasum);
//}
//if (buf.isInside[i] && i % 10 == 0)
// printf("p %d's poro force sum is (%f,%f,%f)\n", i, forcesum.x, forcesum.y, forcesum.z);
buf.mforce[i] += forcesum;
}
__device__ void contributeFluidFlux(int i, int cell, bufList buf, float&normalize)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float3 pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
int index = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[j] == buf.MFtype[i])
if(buf.MFtype[j] <= 1)
continue;
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dist *= simData.psimscale;
dsq = sqrt(dsq*d2);
c = (simData.psmoothradius - dsq);
cmterm = c*c*simData.spikykern*buf.mf_restmass[j] * buf.density_solid[j];
pmterm = dist / dsq*cmterm;
for (int k = 1; k < simData.mf_catnum; ++k)
{
//cmterm1 = simData.CoCompressibility * simData.rest_porosity - buf.pressure_water[j*MAX_FLUIDNUM + k];
cmterm1 = buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2]
- buf.pressure_water[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2];
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2] +=
(buf.mf_alpha[i*MAX_FLUIDNUM+k] +
buf.mf_beta[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2])
*cmterm1*pmterm;
}
normalize += cmterm;
}
}
__global__ void ComputeFluidFlux(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
//if (buf.misbound[i])
// return;
if (buf.MFtype[i] != 0)
return;
//if (buf.MFtype[i] == 1 && buf.isSurface[buf.elasticID[i]]!=1)
// return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float normalize = 0;
for(int k=1;k<simData.mf_catnum;++k)
for (int l = 0; l<MAX_SOLIDNUM; ++l)
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM+k*MAX_SOLIDNUM+l] = make_float3(0, 0, 0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributeFluidFlux(i, gc + simData.gridAdj[c], buf, normalize);
}
//if(normalize !=0)
for (int k = 1; k<simData.mf_catnum; ++k)
for (int l = 0; l < MAX_SOLIDNUM; ++l)
//buf.gradPressure[i*MAX_FLUIDNUM + k] *= simData.mf_permeability[k] / (simData.mf_visc[k]*abs(normalize));
{
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]
*= simData.capillary*simData.mf_permeability[k*MAX_SOLIDNUM + l] / simData.mf_visc[k];
//if (dot(buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l], buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]) != 0)
// printf("%d's phase %d %d's grad pressure is (%f,%f,%f)\n", i, k, l,
// buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].x, buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].y,
// buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].z);
}
//if (isnan(dot(buf.gradPressure[i], buf.gradPressure[i])))
//if(dot(buf.gradPressure[i*MAX_FLUIDNUM + 1], buf.gradPressure[i*MAX_FLUIDNUM + 1])!=0&&i%100==0)
// printf("particle %d's type is %d, grad pressure is (%f,%f,%f)\n",
// i, buf.MFtype[i], buf.gradPressure[i*MAX_FLUIDNUM + 1].x, buf.gradPressure[i*MAX_FLUIDNUM + 1].y, buf.gradPressure[i*MAX_FLUIDNUM + 1].z
// );
}
__device__ void contributeSolidDarcyFlux(int i, int cell, bufList buf)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float3 pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//buf.MFtype[j]<=1
if (buf.MFtype[j] != buf.MFtype[i])
continue;
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dist *= simData.psimscale;
dsq = sqrt(dsq*d2);
c = (simData.psmoothradius - dsq);
//if (buf.MFtype[i] == 1)
cmterm = c*c*simData.spikykern*buf.mf_restmass[j] * buf.density_solid[i];
//else
// cmterm = c*c*simData.spikykern*buf.mf_restmass[j] * buf.mdensity[i];
pmterm = dist / dsq*cmterm;
for (int k = 1; k<simData.mf_catnum; ++k)
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i]-2] -=
(buf.pressure_water[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2]
- buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2])*pmterm;
//normalize += cmterm;
}
}
__global__ void ComputeSolidDarcyFlux(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] <= 1)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
for (int k = 1; k<simData.mf_catnum; ++k)
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] = make_float3(0, 0, 0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributeSolidDarcyFlux(i, gc + simData.gridAdj[c], buf);
}
for (int k = 1; k < simData.mf_catnum; ++k)
{
//poro velocity
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+buf.MFtype[i]-2]
*= simData.mf_permeability[k*MAX_SOLIDNUM+ buf.MFtype[i] - 2] / (simData.mf_visc[k] * simData.rest_porosity);
//buf.gradPressure[i*MAX_FLUIDNUM + k] += buf.mveleval[i];
}
}
__device__ void contributeFluidChange(int i, int cell, bufList buf)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
//int jndex;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
float sum = 0;
//if (i % 100 == 0)
// printf("particle %d's gridcnt is %d\n", i,buf.mgridcnt[cell]);
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
//int index = buf.elasticID[i];
//int jndex,index = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] <= 1)
continue;
//jndex = buf.elasticID[j];
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dsq = sqrt(dsq);
c = simData.psmoothradius - dsq;
cmterm = c*c*simData.spikykern * buf.mf_restmass[j] * buf.density_solid[j];
for(int k=1;k<simData.mf_catnum;++k)
buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM+k*MAX_SOLIDNUM+buf.MFtype[j]-2] +=
dot(buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2], dist / dsq)*cmterm;
//normalize += cmterm;
}
return;
}
__global__ void ComputeFluidChange(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i] != 0)
return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
if (gc == GRID_UNDEF) return;
gc -= nadj;
for (int k = 0; k<simData.mf_catnum; ++k)
for(int l=0;l<MAX_SOLIDNUM;++l)
buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributeFluidChange(i, gc + simData.gridAdj[c], buf);
}
for (int k = 1; k < simData.mf_catnum; ++k)
{
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] *= simData.mf_dt;
if (buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] == 0)
buf.poroVel[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = make_float3(0, 0, 0);
if (buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] > 0)
{
if (buf.mf_alpha[i*MAX_FLUIDNUM + k] - buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] < 0.001)
{
buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = buf.mf_alpha[i*MAX_FLUIDNUM + k];
}
}
if (buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] < 0)
{
if (buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] + buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] < 0.001)
buf.divDarcyFlux[i*MAX_FLUIDNUM *MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = -buf.mf_beta[i*MAX_FLUIDNUM *MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
}
buf.mf_beta_next[i*MAX_FLUIDNUM *MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] += buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
buf.mf_alpha_next[i*MAX_FLUIDNUM + k] -= buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
//if (isnan(buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]))
//if(buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]!=0)
// printf("particle %d's phase %d's div Darcy flux is %f, darcy flux is (%f,%f,%f)\n",
// i, k, buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l],
// buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].x,
// buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].y,
// buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].z);
}
}
//if (buf.mf_alpha[i*MAX_FLUIDNUM + 1] < buf.mf_alpha[i*MAX_FLUIDNUM + 2]-0.1&&!buf.isInside[i])
// printf("particle %d's alpha is (%f,%f), beta is (%f,%f), divDarcyFlux is (%f,%f)\n",
// i, buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2],
// buf.mf_beta[i*MAX_FLUIDNUM + 1], buf.mf_beta[i*MAX_FLUIDNUM + 2],
// buf.divDarcyFlux[i*MAX_FLUIDNUM + 1], buf.divDarcyFlux[i*MAX_FLUIDNUM + 2]);
}
__device__ void contributeFluidAdvance(int i, int cell, bufList buf, float3*gradBeta, float*DivVelocity)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float pmterm;
int j;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++) {
j = buf.mgrid[cndx];
if (buf.MFtype[j] != 0)
continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq = dot(dist, dist);
if (!(dsq < r2 && dsq > 0))
continue;
dsq = sqrt(dsq);
c = simData.psmoothradius - dsq;
for (int k = 1; k < simData.mf_catnum; ++k)
{
cmterm = c*c*simData.spikykern * buf.mf_restmass[j] * buf.mdensity[j];
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
DivVelocity[k*MAX_SOLIDNUM + l] += cmterm *
dot((buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l] * buf.poroVel[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l] +
buf.mf_beta[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l] * buf.poroVel[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l]), dist);
gradBeta[k*MAX_SOLIDNUM+l] += cmterm * (buf.mf_beta[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l] - buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l])*dist;
}
}
}
return;
}
__global__ void ComputeFluidAdvance(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float3 gradBeta[MAX_FLUIDNUM*MAX_SOLIDNUM];
float DivVelocity[MAX_FLUIDNUM*MAX_SOLIDNUM],betachange[MAX_FLUIDNUM*MAX_SOLIDNUM];
float sigma = 1;
for (int k = 1; k < simData.mf_catnum; ++k)
{
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
gradBeta[k*MAX_SOLIDNUM + l] = make_float3(0, 0, 0);
DivVelocity[k*MAX_SOLIDNUM + l] = 0;
}
}
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributeFluidAdvance(i, gc + simData.gridAdj[c], buf, gradBeta, DivVelocity);
}
//float betasum = 0;
for (int k = 1; k < simData.mf_catnum; ++k)
{
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
betachange[k*MAX_SOLIDNUM+l] = sigma*simData.mf_dt*(-DivVelocity[k*MAX_SOLIDNUM + l] + dot(buf.mveleval[i], gradBeta[k*MAX_SOLIDNUM + l]));
/*if (abs(betachange[k]) >= 0.0001)
printf("error! particle %d's beta change is (%f,%f)\n",
i, betachange[1], betachange[2]);*/
//betachange limit
if (betachange[k*MAX_SOLIDNUM + l] < -0.99)
{
betachange[k*MAX_SOLIDNUM + l] = -0.99;// * ((int)(buf.mf_alpha[muloffseti+fcount]>0)-(int)(buf.mf_alpha[muloffseti+fcount]<0));
}
//betasum += buf.mf_beta_next[i*MAX_FLUIDNUM + k];
buf.mf_beta_next[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] += betachange[k*MAX_SOLIDNUM + l];
}
}
//if (i % 10000 == 0 && buf.solidCount[i]!=0)
// printf("particle %d's beta change is (%f,%f)\n",
// i, betachange[1], betachange[2]);
}
__device__ float3 contributeCapillaryForce(int i, int cell, bufList buf)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float3 pmterm;
int j, jndex;
float3 sum = make_float3(0,0,0);
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
//float kernel, betasum, kparm = 0.007 / pow(simData.psmoothradius, (float)(3.25));
float kernel, betasum, kparm = 8 / (3.1415926*pow(simData.psmoothradius, 3));
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] > 1)
{
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq2 = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq2 < r2 && dsq2 > 0))
continue;
q = sqrt(dsq2 / r2);
//if (q > 1||q==0)
// continue;
if (q <= 0.5)
kernel = 3*q*q-2*q;
else
kernel = -pow(1-q,2);
//kernel *= kparm;
dsq = sqrt(dsq2);
//c = simData.psmoothradius - dsq;
betasum = 0;
for (int k = 1; k < simData.mf_catnum; ++k)
betasum += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2];
sum += betasum*buf.mf_restmass[j] * buf.density_solid[j] * kernel *simData.gradCubicSplineKern * dist / dsq;
//betasum = 1;
//sum += -betasum * buf.mf_restdensity[i] * buf.density_solid[j] * dist / dsq * kernel;
//sum += betasum*buf.mf_restmass[j] * buf.density_solid[j] * c*c *simData.spikykern * dist / dsq;
//dsq = sqrt(dsq2);
//if (2 * dsq > simData.psmoothradius)
// continue;
//c = simData.psmoothradius - dsq;
//betasum = 0;
//for (int k = 1; k < simData.mf_catnum; ++k)
// betasum += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2];
////sum += betasum * buf.mf_restmass[j] * c*c *simData.spikykern * dist / dsq;
// //betasum += buf.mf_alpha[i*MAX_FLUIDNUM + k] * simData.mf_dens[k];
//betasum = 1;
//kernel = pow((float)(2 * dsq - 4 * dsq*dsq / simData.psmoothradius), (float)0.25);
////kernel = sqrt(sqrt(6 * dsq - 2 * simData.psmoothradius - 4 * dsq*dsq / simData.psmoothradius));
//kernel *= kparm;
//sum += -betasum * buf.mf_restdensity[i] * buf.density_solid[j] * dist/dsq * kernel;
}
}
return sum;
}
__global__ void ComputeCapillaryForce(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float colorField = 0;
float3 normal = make_float3(0, 0, 0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
normal += simData.capillaryForceRatio * contributeCapillaryForce(i, gc + simData.gridAdj[c], buf);
}
if ( isnan(dot(normal, normal)))
printf("capillary force is (%f,%f,%f)\n", normal.x, normal.y, normal.z);
//colorField = dot(normal, normal);
//if (colorField > simData.AL2) {
// normal *= simData.AL / sqrt(colorField);
//}
buf.mforce[i] += normal;
buf.poroForce[i] += normal;
buf.maccel[i] = buf.mforce[i];
}
__device__ float3 contributeInnerBoundaryForce(int i, int cell, bufList buf, float betasum, float kparm)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float3 pmterm;
int j, jndex;
float3 sum = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 1)
{
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq2 = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
//if (!(dsq2 < r2 && dsq2 > 0))
// continue;
dsq = sqrt(dsq2);
if (2 * dsq >= simData.psmoothradius)
continue;
cmterm = 0.5*buf.mf_visc[i]*simData.psmoothradius*buf.mdensity[i];
cmterm *= (max((float)0, dot(dist, -buf.mveleval[i]))) / (0.01*r2 + dsq2)*buf.density_solid[j];
//c = (simData.psmoothradius - dsq);
//if (buf.MFtype[i] == 1)
//cmterm *= c*c*simData.spikykern;
//if (2 * dsq - 4 * dsq2 / simData.psmoothradius < 0)
// continue;
cmterm *= kparm*pow(2 * dsq - 4 * dsq2 / simData.psmoothradius, (float)0.25);
//if (isnan(cmterm))
// continue;
sum += betasum*cmterm * dist / dsq;
}
}
return sum;
}
__global__ void ComputeInnerBoundaryForce(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float colorField = 0;
float betasum = 0;
for (int k = 1; k < simData.mf_catnum; ++k)
for(int l=1;l<=3;++l)
betasum += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
//betasum = 1;
if (betasum < 0.001)
return;
float kparm = 0.007 / pow(simData.psmoothradius, (float)(3.25));
//printf("beta sum%f\n", betasum);
float3 normal = make_float3(0, 0, 0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
normal += contributeInnerBoundaryForce(i, gc + simData.gridAdj[c], buf, betasum, kparm);
}
if (isnan(dot(normal,normal)))
printf("inner boundary force is (%f,%f,%f)\n", normal.x, normal.y, normal.z);
//colorField = dot(normal, normal);
//if (colorField > simData.AL2) {
// normal *= simData.AL / sqrt(colorField);
//}
buf.mforce[i] += normal;
buf.poroForce[i] += normal;
buf.maccel[i] = buf.mforce[i];
}
__device__ float3 contributeSurfaceTension2(int i, int cell, bufList buf)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
int j, jndex;
float3 sum = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] > 1)
{
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dsq = sqrt(dsq);
c = simData.psmoothradius - dsq;
cmterm = buf.mf_restmass[j] * buf.density_solid[j] * c*c / dsq*simData.spikykern;
//sum += (buf.pressure_water[i*MAX_FLUIDNUM] - buf.pressure_water[j*MAX_FLUIDNUM])*cmterm;
sum += (buf.pressure_water[i*MAX_FLUIDNUM])*cmterm;
}
}
return sum;
}
__global__ void ComputeSurfaceTension2(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float colorField = 0;
float3 normal = make_float3(0, 0, 0);
float mor = 2/simData.CoCompressibility;
//float mor = 0.002;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
normal += mor * contributeSurfaceTension2(i, gc + simData.gridAdj[c], buf);
}
buf.mforce[i] += normal / buf.mf_restdensity[i];
//buf.poroForce[i] += (buf.mf_beta[i*MAX_FLUIDNUM + 1] + buf.mf_beta[i*MAX_FLUIDNUM + 2])*normal;
buf.maccel[i] = buf.mforce[i];
}
//capillary force exert on fluid particles
void ComputePorousForceCUDA()
{
hipError_t error;
ComputeSolidDarcyFlux << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute poro velocity CUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputePoroVelocity << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute poro velocity CUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
//if(fcuda.example == 11)
// ComputeSurfaceTension2 << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//else
ComputeCapillaryForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute surface tension CUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
//if (fcuda.example != 6)
//{
//ComputeInnerBoundaryForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = hipGetLastError();
//if (error != hipSuccess) {
// fprintf(stderr, "CUDA ERROR: compute surface tension CUDA: %s\n", hipGetErrorString(error));
//}
//hipDeviceSynchronize();
//}
//fluid flow between fluids and solid surface
ComputeFluidFlux << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute fluid flux CUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
ComputeFluidChange << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = hipGetLastError();
if (error != hipSuccess) {
fprintf(stderr, "CUDA ERROR: compute fluid flux CUDA: %s\n", hipGetErrorString(error));
}
hipDeviceSynchronize();
}
//**************************************************************************************************
//implicit incompressible SPH
__device__ float3 contributePressureForce(int i,float3 pos,int cell, bufList buf, int& count)
{
float3 force = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0)return force;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
float3 vmr;
float cmterm;
float3 vmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for(int cndx = cfirst;cndx < clast;cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[i] != buf.MFtype[j] && (!buf.misbound[i] && !buf.misbound[j]))
// continue;
/*if (buf.MFtype[i] == 1 && buf.MFtype[i] == buf.MFtype[j])
continue;*/
//if (buf.MFtype[i] == buf.MFtype[j] && buf.MFtype[i] == 1)
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
dsq = sqrt(dsq2);
if (dsq2 > r2 || dsq2 <= 0)
continue;
//if (buf.MFtype[i] == 1 && buf.MFtype[i] == buf.MFtype[j])
// continue;
count++;
c = simData.psmoothradius - dsq;
//cmterm = buf.mf_restmass[j] * (buf.mpress[i] * pow(buf.mdensity[i], 2) + buf.mpress[j] * pow(buf.mdensity[j], 2));
//force -= cmterm *c*c*dist*simData.spikykern/dsq;
//force += buf.volume[j]*c*c*simData.spikykern*dist / dsq*(buf.mpress[i] + buf.mpress[j]);
//pairwise pressure force
if(buf.volume[j] * buf.volume[i]!=0)
force += c*c*simData.spikykern*dist / dsq*buf.volume[j]* buf.volume[i]*(buf.mpress[j] + buf.mpress[i])/(buf.volume[i]+ buf.volume[j]);
}
return force;
}
//fluid pressure force
__global__ void ComputePressureForce(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
//if (i % 30000 == 0)
// printf("particle %d's type is %d, press is %.10f\n",
// i, buf.MFtype[i], buf.mpress[i]);
if (buf.misbound[i])
{
buf.mforce[i] = make_float3(0, 0, 0);
buf.maccel[i] = buf.mforce[i];
buf.pressForce[i] = make_float3(0, 0, 0);
return;
}
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
// Sum Pressures
float3 pos = buf.mpos[i];
//float dens = buf.mf_restdensity[i];
float3 force = make_float3(0, 0, 0);
int count = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
force += contributePressureForce(i, pos, gc + simData.gridAdj[c], buf, count);
}
//if (isnan(dot(force, force)))
//if(isnan(buf.volume[i])||isnan(buf.mpress[i]))
// printf("particle %d's type is %d, force is nan. press is %f, volume is %.10f,fluid percent is %f\n",
// i, buf.MFtype[i], buf.mpress[i], buf.volume[i], buf.fluidPercent[i]);
if(buf.MFtype[i] == 0)
buf.pressForce[i] = -buf.volume[i]/buf.mf_restmass[i]*force;
else
{
float mass = buf.mf_restmass[i];
//for (int k = 1; k < MAX_FLUIDNUM; ++k)
// mass += simData.stRatio * buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] * simData.mf_mass[k];
buf.pressForce[i] = -buf.volume[i] / mass * force;
}
//if (dot(buf.mforce[i], buf.mforce[i]) > 10000)
// printf("particle %d's type is %d, pressure force is (%f,%f,%f), pressure is %f\n",
// i, buf.MFtype[i], buf.mforce[i].x, buf.mforce[i].y, buf.mforce[i].z,
// buf.mpress[i]);
//if(isnan(dot(buf.mforce[i],buf.mforce[i])))
//if (dot(buf.mforce[i],buf.mforce[i])>10 && !buf.misbound[i])
// printf("particle %d's type is %d, pressure force is (%.10f,%.10f,%.10f),count is %d, press is %.10f, aii is %.10f, deltadensity is %.10f, rest mass is %.10f, volume is %.10f\n",
// i, buf.MFtype[i], buf.mforce[i].x, buf.mforce[i].y, buf.mforce[i].z, count, buf.mpress[i],buf.aii[i], buf.delta_density[i],buf.mf_restmass[i],buf.volume[i]);
//if (i % 30000 == 0)
// printf("volume is %.10f, m/rho is %.10f\n", buf.volume[i], buf.mf_restmass[i] * buf.mdensity[i]);
}
//fluid pressure force
__global__ void ApplyPressureForce(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.misbound[i])
{
buf.mforce[i] = make_float3(0, 0, 0);
buf.maccel[i] = buf.mforce[i];
return;
}
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
/*for (uint fcount = 0; fcount<simData.mf_catnum; fcount++)
{
buf.mf_alphagrad[i*MAX_FLUIDNUM + fcount] = make_float3(0, 0, 0);
buf.mf_alpha[i*MAX_FLUIDNUM + fcount] = buf.mf_alpha_next[i*MAX_FLUIDNUM + fcount];
buf.mf_beta[i*MAX_FLUIDNUM + fcount] = buf.mf_beta_next[i*MAX_FLUIDNUM + fcount];
}*/
// Sum Pressures
float3 pos = buf.mpos[i];
//float dens = buf.mf_restdensity[i];
float3 force = make_float3(0, 0, 0);
int count = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
force += contributePressureForce(i, pos, gc + simData.gridAdj[c], buf, count);
}
/*if(i%10000==0)
printf("particle %d's type is %d,source is %f, aii is %.10f,press is %f, vel is (%f,%f,%f),volume is %.10f,rest volume is %.10f,press force is (%f,%f,%f),alpha is (%f,%f,%f),beta is (%f,%f,%f)\n",
i, buf.MFtype[i], buf.source[i], buf.aii[i], buf.mpress[i],
buf.vel_mid[i].x, buf.vel_mid[i].y, buf.vel_mid[i].z,
buf.volume[i], buf.rest_volume[i], buf.pressForce[i].x, buf.pressForce[i].y, buf.pressForce[i].z,
buf.mf_alpha[i*MAX_FLUIDNUM + 0], buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2],
buf.mf_beta[i*MAX_FLUIDNUM + 0], buf.mf_beta[i*MAX_FLUIDNUM + 1], buf.mf_beta[i*MAX_FLUIDNUM + 2]);
*/
buf.pressForce[i] = -buf.volume[i] / buf.mf_restmass[i] * force;
if(buf.MFtype[i] == 0)
buf.mforce[i] += -buf.volume[i] / buf.mf_restmass[i] * force;
else
{
float mass = buf.mf_restmass[i];
//for (int k = 1; k < MAX_FLUIDNUM; ++k)
// mass += simData.stRatio * buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] * simData.mf_mass[k];
//if (buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] != 0)
// printf("type %d's fluid beta %d is %f\n", buf.MFtype[i] - 2, k, buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2]);
buf.mforce[i] += -buf.volume[i] / mass * force;
}
buf.fluidForce[i] = -buf.volume[i] / buf.mf_restmass[i] * force;
//if (i % 10 == 0 && buf.isInside[i])
// printf("p %d's press force is (%f,%f,%f)\n",
// i, buf.fluidForce[i].x, buf.fluidForce[i].y, buf.fluidForce[i].z);
//if (dot(buf.mforce[i], buf.mforce[i]) > 10000)
// printf("particle %d's type is %d, pressure force is (%f,%f,%f), pressure is %f\n",
// i, buf.MFtype[i], buf.mforce[i].x, buf.mforce[i].y, buf.mforce[i].z,
// buf.mpress[i]);
buf.maccel[i] = buf.mforce[i];
if(isnan(dot(buf.mforce[i],buf.mforce[i])))
//if (dot(buf.mforce[i],buf.mforce[i])>10 && !buf.misbound[i])
printf("particle %d's type is %d, pressure force is (%.10f,%.10f,%.10f),count is %d, press is %.10f, aii is %.10f, deltadensity is %.10f, rest mass is %.10f, volume is %.10f\n",
i, buf.MFtype[i], buf.mforce[i].x, buf.mforce[i].y, buf.mforce[i].z, count, buf.mpress[i],buf.aii[i], buf.delta_density[i],buf.mf_restmass[i],buf.volume[i]);
//if (i % 30000 == 0)
// printf("volume is %.10f, m/rho is %.10f\n", buf.volume[i], buf.mf_restmass[i] * buf.mdensity[i]);
}
__device__ float3 contributeViscosity(int i, int muli, float idens, float3 pos, int cell, bufList buf, float* ialpha_pre, float3* ivmk)
{
float3 force = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0)return force;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist, sf;
float c, dsq2, dsq;
int j, mulj;
float3 vmr;
float cmterm, vmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float xvprod, phiij, densityij, PIij, q;
float3 fP;
float cmterm1, vmterm1;
float viscoRatio = 1;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.misbound[j])
continue;
mulj = j * MAX_FLUIDNUM;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
if (dsq2 <= 0 || dsq2 >= r2)
continue;
dsq = sqrt(dsq2);
vmr = buf.mveleval[i] - buf.mveleval[j];
//viscosity
c = (simData.psmoothradius - dsq);
cmterm1 = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.density_solid[j];
vmterm1 = cmterm1 * (buf.mf_visc[i] + buf.mf_visc[j]) * idens;
//if ((buf.MFtype[i] == 4 && buf.MFtype[j] == 4))
// force += vmterm1 * vmr;
if (buf.MFtype[i] == buf.MFtype[j])
{
//if (buf.MFtype[i] != 0)
// force += viscoRatio * vmterm1 * vmr;
if (buf.MFtype[i] == 0)
{
float fluidsum = buf.mf_alpha_sum[i] * buf.mf_alpha_sum[j];
if (fluidsum <= 0.01)
fluidsum = 0;
else
fluidsum /= (buf.mf_alpha_sum[i] + buf.mf_alpha_sum[j]);
float fluidsum2 = (1 - buf.mf_alpha_sum[i])*(1 - buf.mf_alpha_sum[j]);
if (fluidsum2 <= 0.01)
fluidsum2 = 0;
else
fluidsum2 /= (2 - buf.mf_alpha_sum[i] - buf.mf_alpha_sum[j]);
//if (_example == 2)
// fluidsum2 = 0;
//force += (fluidsum + fluidsum2) * vmterm * dist / dsq;
force += (fluidsum + fluidsum2) * vmterm1 * vmr;
}
}
//else
//{
// float fluidsum = 0;
// if (buf.MFtype[i] == 0)
// fluidsum = buf.mf_alpha_sum[i];
// if (buf.MFtype[j] == 0)
// fluidsum = buf.mf_alpha_sum[j];
// force += fluidsum * vmterm1 * vmr;
//}
//if(buf.MFtype[i] + buf.MFtype[j] == 9)
// force += viscoRatio * vmterm1 * vmr;
}
return force;
}
__global__ void ComputeOtherForce(bufList buf, int pnum, float time)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.misbound[i])return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
bool error = false;
// Sum Pressures
float3 pos = buf.mpos[i];
float dens = buf.mf_restdensity[i];
float3 force = make_float3(0, 0, 0);
float normalize = 0;
register uint muloffseti = i * MAX_FLUIDNUM;
register float alpha[MAX_FLUIDNUM];
register float3 ivmk[MAX_FLUIDNUM];
for (uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
//buf.mf_alphagrad[i*MAX_FLUIDNUM + fcount] = make_float3(0, 0, 0);
alpha[fcount] = buf.mf_alpha_next[muloffseti + fcount];
//buf.mf_alpha_pre[i*MAX_FLUIDNUM + fcount] = buf.mf_alpha[i*MAX_FLUIDNUM + fcount];
ivmk[fcount] = buf.mf_vel_phrel[muloffseti + fcount];
}
for (int c = 0; c < simData.gridAdjCnt; c++)
{
force += contributeViscosity(i, muloffseti, buf.mdensity[i], pos, gc + simData.gridAdj[c], buf, alpha, ivmk);
}
//if (dot(force, force) > 10)
// printf("particle %d's viscosity force is (%f,%f,%f)\n",
// i, force.x, force.y, force.z);
//bound force and gravity
//buf.mforce[i] += getBoundForce(i, buf, force, time);
buf.mforce[i] = force;
buf.fluidForce[i] = force;
buf.maccel[i] = buf.mforce[i];
/*if (buf.MFtype[i] == 0)
{
buf.mforce[i] *= 1-buf.absorbedPercent[i];
buf.maccel[i] *= 1-buf.absorbedPercent[i];
}*/
if (isnan(dot(force,force)))
printf("particle %d's type is %d,visco force is (%f,%f,%f),pos is (%f,%f,%f), alpha sum is %f\n",
i, buf.MFtype[i], buf.mforce[i].x, buf.mforce[i].y, buf.mforce[i].z,
buf.mpos[i].x, buf.mpos[i].y, buf.mpos[i].z, buf.mf_alpha_sum[i]);
}
__device__ float contributeColorValue(int i, float3 pos, int cell, bufList buf)
{
if (buf.mgridcnt[cell] == 0)return 0;
float sum = 0;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j, mulj;
float pmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 2 || (_example == 2 && buf.MFtype[j] >= 2))
continue;
mulj = j * MAX_FLUIDNUM;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
q = sqrt(dsq2 / r2);
if (q>2)
continue;
if (q >= 0 && q <= 1)
pmterm = simData.CubicSplineKern2*(1 - 1.5*q*q*(1 - q / 2));
else
pmterm = simData.CubicSplineKern1*pow(2 - q, 3);
sum += pmterm * (buf.rest_colorValue[j]) * buf.mf_restmass[j] * buf.mdensity[j];
}
return sum;
}
__global__ void ComputeColorValue(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i] == 2)return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
bool error = false;
// Sum Pressures
float3 pos = buf.mpos[i];
buf.colorValue[i] = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
buf.colorValue[i] += contributeColorValue(i, pos, gc + simData.gridAdj[c], buf);
}
}
__device__ float3 contributeColorTensor(int i, int cell, bufList buf, float &sigma)
{
float3 sum = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0)return sum;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j, mulj;
float pmterm, cmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 2 || (_example == 2 && buf.MFtype[j] >= 2))
continue;
mulj = j * MAX_FLUIDNUM;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
if (dsq2 > r2 ||dsq2 <=0)
continue;
dsq = sqrt(dsq2);
c = simData.psmoothradius - dsq;
cmterm = c*c*simData.spikykern / dsq;
pmterm = pow(r2 - dsq2, 3)*simData.poly6kern;
sum += cmterm * buf.colorValue[j] * buf.mf_restmass[j] * buf.mdensity[j] * dist;
sigma += pmterm;
}
return sum;
}
__global__ void ComputeColorTensor(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i]!=0)return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
bool error = false;
// Sum Pressures
float3 pos = buf.mpos[i];
for (int k = 0; k < 9; ++k)
buf.colorTensor[i * 9 + k] = 0;
float3 gradCV = make_float3(0, 0, 0);
float sigma = 0, divCV;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
gradCV += contributeColorTensor(i, gc + simData.gridAdj[c], buf, sigma);
}
divCV = dot(gradCV, gradCV);
if ((sqrt(divCV)) < 0.000000001)
{
for (int k = 0; k < 9; ++k)
buf.colorTensor[i * 9 + k] = 0;
return;
}
tensorProduct(gradCV, gradCV, buf.colorTensor + i * 9);
for (int m = 0; m < 3; ++m)
{
for (int n = 0; n < 3; ++n)
if (m == n)
buf.colorTensor[i * 9 + m * 3 + n] = divCV / 3 - buf.colorTensor[i * 9 + m * 3 + n];
else
buf.colorTensor[i * 9 + m * 3 + n] = - buf.colorTensor[i * 9 + m * 3 + n];
}
//if(abs(divCV) > 1)
////if (i % 1000 == 0 || isnan(buf.colorValue[i]))
// //printf("%d's color value is %f, gradCV is (%f,%f,%f)\n", i, buf.colorValue[i], gradCV.x, gradCV.y, gradCV.z);
// printf("%d's color tensor is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f), gradCV is (%f,%f,%f), sigma is %f\n", i,
// buf.colorTensor[i * 9 + 0], buf.colorTensor[i * 9 + 1], buf.colorTensor[i * 9 + 2],
// buf.colorTensor[i * 9 + 3], buf.colorTensor[i * 9 + 4], buf.colorTensor[i * 9 + 5],
// buf.colorTensor[i * 9 + 6], buf.colorTensor[i * 9 + 7], buf.colorTensor[i * 9 + 8], gradCV.x, gradCV.y, gradCV.z,
// sigma);
for (int k = 0; k<9; ++k)
{
buf.colorTensor[i * 9 + k] *= simData.stRatio / (sqrt(divCV)*sigma*sigma);
}
}
//__device__ float3 contributeDijPj(int i, float3 pos, int cell, bufList buf)
//{
// float3 DijPj = make_float3(0,0,0);
// if (buf.mgridcnt[cell] == 0)return DijPj;
//
// register float d2 = simData.psimscale * simData.psimscale;
// register float r2 = simData.r2;
// float3 dist;
// float c, dsq2, dsq;
// int j;
// float3 dji;
// float cmterm;
// float3 vmterm;
// int cfirst = buf.mgridoff[cell];
// int clast = cfirst + buf.mgridcnt[cell];
// float q;
// for (int cndx = cfirst; cndx < clast; cndx++)
// {
// j = buf.mgrid[cndx];
// dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
// dsq2 = dot(dist, dist);
// dsq = sqrt(dsq2);
// //q = dsq / simData.psmoothradius;
// //if (q >= 2 || q <= 0)
// // continue;
// //cmterm = buf.mf_restmass[j] * pow(buf.mdensity[j], 2)*buf.mpress[j];
// //if(q>1)
// //{
// // vmterm = simData.gradCubicSplineKern1*(2 - q)*(2 - q)*dist;
// // DijPj += cmterm*vmterm;
// //}
// //else
// //{
// // vmterm = simData.gradCubicSplineKern2*(2.25*q*q - 3 * q)*dist;
// // DijPj += cmterm*vmterm;
// //}
// if (dsq2 > r2 || dsq2 <= 0)
// continue;
// c = (simData.psmoothradius - dsq);
// cmterm = buf.mf_restmass[j] * pow(buf.mdensity[j], 2)*buf.mpress[j];
// DijPj += c*c*dist *cmterm*simData.spikykern/dsq;
// //DijPj += buf.mpress[j]*c*c*simData.spikykern*buf.mf_restmass[j] * pow(buf.mdensity[j], 2)*dist;
// //DijPj += -buf.mf_restmass[j] * pow()
// }
// return DijPj;
//}
//__global__ void ComputeDijPj(bufList buf, int pnum)
//{
// uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
// if (i >= pnum) return;
//
// // Get search cell
// int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
// uint gc = buf.mgcell[i];
// if (gc == GRID_UNDEF) return; // particle out-of-range
// gc -= nadj;
// bool error = false;
// // Sum Pressures
// float3 pos = buf.mpos[i];
// float dens = buf.mf_restdensity[i];
// buf.DijPj[i] = make_float3(0,0,0);
// for (int c = 0; c < simData.gridAdjCnt; c++)
// {
// buf.DijPj[i] += contributeDijPj(i, pos, gc + simData.gridAdj[c], buf);
// }
// buf.DijPj[i] *= -simData.mf_dt*simData.mf_dt;
// //if (i % 20000 == 0)
// // printf("particle %d's dijpj is (%f,%f,%f),press is %f\n",
// // i, buf.DijPj[i].x, buf.DijPj[i].y, buf.DijPj[i].z, buf.mpress[i]);
//}
//__global__ void updatePress(bufList buf, int pnum)
//{
// uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
// if (i >= pnum) return;
//
// // Get search cell
// int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
// uint gc = buf.mgcell[i];
// if (gc == GRID_UNDEF) return; // particle out-of-range
// gc -= nadj;
// bool error = false;
// // Sum Pressures
// float3 pos = buf.mpos[i];
// float dens = buf.mf_restdensity[i];
// float omega = 0.5;
// buf.mpress_pre[i] = (1 - omega) * buf.mpress[i];
// float sum = 0;
// for (int c = 0; c < simData.gridAdjCnt; c++)
// {
// sum += contributePressureIteration(i, pos, gc + simData.gridAdj[c], buf);
// }
// float delta = buf.mf_restdensity[i] - buf.inter_density[i] - sum;
// if (buf.aii[i] == 0)
// buf.mpress_pre[i] = buf.mpress[i];
// else
// buf.mpress_pre[i] += omega / buf.aii[i] * (delta);
//
// //if (buf.mpress_pre[i] < 0)
// // buf.mpress_pre[i] = 0;
// //if (i % 40000 == 0)
// // printf("aii is %.10f\n", buf.aii[i]);
// // printf("particle %d's press is %.10f,new press is %.10f, sum is %.10f, inter_density is %.10f,initial density is %f, aii is %.10f,delta is %.10f\n",
// // i, buf.mpress[i], buf.mpress_pre[i], sum, buf.inter_density[i],1/buf.mdensity[i], buf.aii[i],delta);
//}
//__global__ void applyPress(bufList buf, int pnum)
//{
// uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
// if (i >= pnum) return;
//
// // Get search cell
// int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
// uint gc = buf.mgcell[i];
// if (gc == GRID_UNDEF) return; // particle out-of-range
// gc -= nadj;
// if (buf.mpress_pre[i] < 0)
// buf.mpress_pre[i] = 0;
// buf.mpress[i] = buf.mpress_pre[i];
// //if (i % 2000==0)
// // printf("particle %d's press is %f\n", i, buf.mpress[i]);
//}
__device__ float contributeCriterion(int i, int cell, bufList buf)
{
float sum = 0;
if (buf.mgridcnt[cell] == 0)return sum;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
float3 delta_force;
float3 cmterm, vmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[i] != buf.MFtype[j] && (!buf.misbound[i] && !buf.misbound[j]))
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
dsq = sqrt(dsq2);
if (dsq2 > r2 || dsq2 <= 0)
continue;
//if (buf.MFtype[i] == 1 && buf.MFtype[i] == buf.MFtype[j])
// continue;
c = simData.psmoothradius - dsq;
//delta_force = buf.mf_restmass[j] * (buf.mforce[i] - buf.mforce[j]);
//sum += dot(delta_force, dist)*c*c*simData.spikykern/dsq;
//compute Ap
//cmterm = buf.volume[j] * (buf.mforce[i] - buf.mforce[j]);
//pairwise Ap
if (buf.volume[i] * buf.volume[j] != 0)
cmterm = buf.volume[i] * buf.volume[j] /(buf.volume[j]+buf.volume[i])* (buf.pressForce[i] - buf.pressForce[j]);
else
continue;
sum += dot(cmterm, dist / dsq)*c*c*simData.spikykern;
}
return sum;
}
__global__ void ComputeCriterion(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
//if (buf.MFtype[i] == 3)return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float sum = 0;
float omega;
omega = 0.5*buf.rest_volume[i] / pow(simData.psmoothradius / 2, 3);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
sum += contributeCriterion(i, gc + simData.gridAdj[c], buf);
}
sum *= pow(simData.mf_dt, 2);
buf.delta_density[i] = buf.source[i] - sum;
float p = buf.mpress[i];
if (abs(buf.aii[i]) != 0)
buf.mpress[i] = buf.mpress[i] + omega*buf.delta_density[i] / buf.aii[i];
//float fluidsum = 0;
//for (int k = 0; k < simData.mf_catnum; ++k)
// fluidsum += buf.mf_alpha[i*MAX_FLUIDNUM + k];
//if(isnan(buf.delta_density[i]))
//if (buf.mpress[i]!=0)
//if(buf.mpress[i]>1000000||isnan(buf.mpress[i]))
//if(abs(buf.delta_density[i])>1)
//if(buf.mpos[i].y<-5)
//printf("particle %d's type is %d, Ap is %f,source is %f, aii is %.10f,press is %f,press pre is %.10f, vel is (%f,%f,%f),volume is %.10f,rest volume is %.10f,press force is (%f,%f,%f),alpha is (%f,%f,%f),beta is (%f,%f,%f)\n",
// i, buf.MFtype[i], sum, buf.source[i], buf.aii[i], buf.mpress[i], p,
// buf.vel_mid[i].x, buf.vel_mid[i].y,buf.vel_mid[i].z,
// buf.volume[i],buf.rest_volume[i], buf.pressForce[i].x, buf.pressForce[i].y, buf.pressForce[i].z,
// buf.mf_alpha[i*MAX_FLUIDNUM + 0], buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2],
// buf.mf_beta[i*MAX_FLUIDNUM + 0], buf.mf_beta[i*MAX_FLUIDNUM + 1], buf.mf_beta[i*MAX_FLUIDNUM + 2]);
if (buf.mpress[i] < 0)
buf.mpress[i] = 0;
//if (buf.misbound[i] == 0)
//{
// if (buf.mpress[i] > 10000)
// buf.mpress[i] = 10000;
//}
//else
//{
if (buf.mpress[i] > 1000000)
buf.mpress[i] = 1000000;
//}
}
//************************************************************************
//pressure boundary for IISPH
__device__ float contributeBRestVolume(int i, int cell, bufList buf)
{
float sum = 0;
if (buf.mgridcnt[cell] == 0)return sum;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[i]!=buf.MFtype[j])
continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
//dsq = sqrt(dsq2);
if (dsq2 > r2 || dsq2 <= 0)
continue;
c = r2 - dsq2;
sum += pow(c, 3)*simData.poly6kern;
}
return sum;
}
__global__ void ComputeBRestVolume(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i]==0)
{
float sum = 0;
for (int k = 1; k < simData.mf_catnum; ++k)
sum += buf.mf_alpha[i*MAX_FLUIDNUM+k];
buf.rest_volume[i] = sum*pow(simData.psmoothradius / 2, 3);
if (isnan(sum))
printf("error:sum is nan! fluid percent is (%f,%f,%f)\n",
buf.mf_alpha[i*MAX_FLUIDNUM + 0],
buf.mf_alpha[i*MAX_FLUIDNUM + 1],
buf.mf_alpha[i*MAX_FLUIDNUM + 2]);
return;
}
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float sum = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
sum += contributeBRestVolume(i, gc + simData.gridAdj[c], buf);
}
sum += pow(simData.r2, 3)*simData.poly6kern;
buf.rest_volume[i] = simData.solid_pfactor / sum;
}
__device__ float contributeVolume(int i, int cell, bufList buf)
{
float sum = 0;
if (buf.mgridcnt[cell] == 0)return sum;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[i] != buf.MFtype[j] && (!buf.misbound[i] && !buf.misbound[j]))
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
//dsq = sqrt(dsq2);
if (dsq2 > r2 || dsq2 <= 0)
continue;
c = r2 - dsq2;
sum += buf.rest_volume[j] * pow(c, 3)*simData.poly6kern;
}
return sum;
}
__global__ void ComputeVolume(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float sum = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
sum += contributeVolume(i, gc + simData.gridAdj[c], buf);
}
sum += buf.rest_volume[i] * pow(simData.r2, 3)*simData.poly6kern;
//if (i % 30000 == 0)
// printf("volume sum is %.10f, 0.15*pow(simData.psmoothradius / 2, 3) is %.10f,rest_volume is %.10f\n",
// sum, 0.15 * pow(simData.psmoothradius / 2, 3), buf.rest_volume[i]);
//if (buf.MFtype[i] != 0)
if(buf.misbound[i])
sum += 0.15*pow(simData.psmoothradius / 2, 3);
if (sum == 0)
buf.volume[i] = 0;
else
buf.volume[i] = buf.rest_volume[i] / sum;
//if (buf.MFtype[i] == 0)
// buf.volume[i] *= buf.fluidPercent[i];
//if (i % 30000 == 0)
//if(buf.misbound[i]&&i%10000==0)
//if (isnan(buf.volume[i]))
//{
// float fluidsum = 0;
// for (int k = 0; k < simData.mf_catnum; ++k)
// fluidsum += buf.mf_fluidPercent[i*MAX_FLUIDNUM + k];
// printf("particle %d's type is %d, rest_volume is %.10f, volume is %.10f, h3 is %.10f, sum is %.10f, fluidpercent is %f\n",
// i, buf.MFtype[i], buf.rest_volume[i], buf.volume[i], 2500000 * pow(simData.psmoothradius / 2, 3), sum, fluidsum);
//}
}
__device__ float contributeSource(int i, int cell, bufList buf)
{
float sum = 0;
if (buf.mgridcnt[cell] == 0)return sum;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float3 velocity,cmterm;
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[i] != buf.MFtype[j] && (!buf.misbound[i] && !buf.misbound[j]))
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
dsq = sqrt(dsq2);
if (dsq2 > r2 || dsq2 <= 0)
continue;
if (buf.MFtype[i] == 1 && buf.MFtype[i] == buf.MFtype[j])
continue;
//if(_example == 2 && (buf.MFtype[i] == buf.MFtype[j]) && buf.MFtype[i])
c = simData.psmoothradius - dsq;
//velocity = buf.vel_mid[i] - buf.vel_mid[j];
//velocity = buf.mveleval[i] - buf.mveleval[j];
//if(buf.MFtype[j]==0)
// velocity *= buf.fluidPercent[j]*buf.volume[j];
//else
// velocity *= buf.volume[j];
//pairwise divergence velocity
if (buf.volume[i] * buf.volume[j] != 0)
velocity = buf.volume[i] * buf.volume[j] / (buf.volume[i] + buf.volume[j]) * (buf.vel_mid[i] - buf.vel_mid[j]);
else
continue;
cmterm = c*c*dist / dsq*simData.spikykern;
sum += -dot(velocity, cmterm);
}
return sum;
}
__global__ void ComputeSource(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float sum = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
sum += contributeSource(i, gc + simData.gridAdj[c], buf);
}
if (buf.MFtype[i] == 0)
{
if(buf.volume[i] == 0)
buf.source[i] = buf.mf_alpha_sum[i]*simData.mf_dt*sum;
else
buf.source[i] = (1
- buf.rest_volume[i] / buf.volume[i]
+ simData.mf_dt*sum)*buf.mf_alpha_sum[i];
}
else
buf.source[i] = 1 - buf.rest_volume[i] / buf.volume[i] + simData.mf_dt*sum;
//if(isnan(buf.source[i]))
/*if (i % 30000 == 0&&buf.MFtype[i]==0)
printf("particle %d's source is %f, fluidsum is %f,cat num is %d, rest_volume is %.10f, buf.volume is %.10f, velocity divergence is %.10f, mid vel is (%f,%f,%f)\n",
i, buf.source[i], fluidsum, simData.mf_catnum, buf.rest_volume[i], buf.volume[i], simData.mf_dt*sum,
buf.vel_mid[i].x, buf.vel_mid[i].y, buf.vel_mid[i].z);*/
}
__device__ float contributeAIIfluid(int i, float3 pos, int cell, bufList buf, float3&sum1, int&count)
{
if (buf.mgridcnt[cell] == 0)return 0;
float sum2 = 0;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
float3 dji;
float cmterm;
float3 vmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[i] != buf.MFtype[j] && (!buf.misbound[i] && !buf.misbound[j]))
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
dsq = sqrt(dsq2);
//spiky kern
if (dsq2 > r2 || dsq2 <= 0)
continue;
c = (simData.psmoothradius - dsq);
//pressure boundary
count++;
//if(buf.MFtype[i]==0||buf.MFtype[i]!=buf.MFtype[j])
sum1 += buf.volume[j] * c*c*simData.spikykern*dist / dsq;
if (!buf.misbound[j]) {
if (buf.volume[j] == 0)
sum2 += 0;
else
sum2 += buf.volume[j] * buf.volume[j] / buf.mf_restmass[j]
* pow(c*c*simData.spikykern, 2);
}
//sum2 += buf.volume[j] * buf.volume[j] / (buf.mf_restmass[j]*(1-buf.absorbedPercent[i]))
// * pow(c*c*simData.spikykern, 2);
}
return sum2;
}
__device__ float contributeAIIsolid(int i, float3 pos, int cell, bufList buf)
{
if (buf.mgridcnt[cell] == 0)return 0;
float sum = 0;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
float3 dji;
float cmterm;
float3 vmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
dsq = sqrt(dsq2);
//spiky kern
if (dsq2 > r2 || dsq2 <= 0)
continue;
c = (simData.psmoothradius - dsq);
//iisph
/*c = (simData.psmoothradius - dsq);
cmterm = dot(buf.dii[i], dist)*buf.mf_restmass[j] * c*c*simData.spikykern / dsq;
buf.aii[i] += cmterm;
vmterm = pow(simData.mf_dt, 2)*buf.mf_restmass[i]
* pow(buf.mdensity[i], 2) *c*c*simData.spikykern *dist /dsq;
vmterm *= c*c*simData.spikykern/dsq*buf.mf_restmass[j];
buf.aii[i] -= dot(vmterm, dist);*/
//pressure boundary
if (!buf.misbound[j]) {
sum += buf.volume[j] * buf.volume[j] / buf.mf_restmass[j] * pow(c*c*simData.spikykern, 2);
}
}
return sum;
}
__global__ void ComputeAII(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
bool error = false;
// Sum Pressures
float3 pos = buf.mpos[i];
float dens = buf.mf_restdensity[i];
buf.aii[i] = 0;
int count = 0;
float3 sum1 = make_float3(0, 0, 0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
if (!buf.misbound[i])
//if(buf.MFtype[i]==0)
buf.aii[i] += contributeAIIfluid(i, pos, gc + simData.gridAdj[c], buf, sum1, count);
else
buf.aii[i] += contributeAIIsolid(i, pos, gc + simData.gridAdj[c], buf);
}
float mass = buf.mf_restmass[i];
buf.aii[i] += dot(sum1, sum1) / mass;
//pressure boundary
buf.aii[i] *= -simData.mf_dt*simData.mf_dt*buf.volume[i];
buf.mpress[i] = 0;
}
| f68a6d8c24e5d929cedb8bd997c33d2df710637f.cu | #include <stdio.h>
#include <math.h>
#include <string.h>
#include <assert.h>
#include <iomanip>
#include <conio.h>
//#include <cutil.h> // cutil32.lib
//#include <cutil_math.h> // cutil32.lib
#include <GL/glut.h>
#include <cuda_gl_interop.h>
#include <helper_cuda.h>
#include <helper_math.h>
#include <driver_types.h>
#include "fluid_system_host.cuh"
#include "fluid_system_kern.cuh"
#include "radixsort.cu" // Build in RadixSort
#include "thrust\device_vector.h" //thrust libs
#include "thrust\sort.h"
#include "thrust\host_vector.h"
#include "cublas_v2.h"
FluidParams fcuda;
bufList fbuf;
//initialInfo elasticInfo;
__device__ FluidParams simData;
__device__ uint gridActive;
__device__ int flagNumFT; //for transfer
__device__ int pNumFT; //for transfer
#define BLOCK_SIZE 256
#define LOCAL_PMAX 896
#define NUM_CELL 27
#define LAST_CELL 26
#define CENTER_CELL 13
float** g_scanBlockSums;
int** g_scanBlockSumsInt;
unsigned int g_numEltsAllocated = 0;
unsigned int g_numLevelsAllocated = 0;
void cudaExit (int argc, char **argv)
{
exit(EXIT_SUCCESS);
//CUT_EXIT(argc, argv);
}
void cudaInit(int argc, char **argv)
{
//CUT_DEVICE_INIT(argc, argv);
findCudaDevice(argc, (const char **)argv);
cudaDeviceProp p;
cudaGetDeviceProperties ( &p, 0);
printf ( "-- CUDA --\n" );
printf ( "Name: %s\n", p.name );
printf ( "Revision: %d.%d\n", p.major, p.minor );
printf ( "Global Mem: %d\n", p.totalGlobalMem );
printf ( "Shared/Blk: %d\n", p.sharedMemPerBlock );
printf ( "Regs/Blk: %d\n", p.regsPerBlock );
printf ( "Warp Size: %d\n", p.warpSize );
printf ( "Mem Pitch: %d\n", p.memPitch );
printf ( "Thrds/Blk: %d\n", p.maxThreadsPerBlock );
printf ( "Const Mem: %d\n", p.totalConstMem );
printf ( "Clock Rate: %d\n", p.clockRate );
fbuf.mgridactive = 0x0;
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mpos, sizeof(float)*3 ) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.maccel, sizeof(float)*3) );
checkCudaErrors ( cudaMalloc((void**)&fbuf.vel_mid, sizeof(float) * 3));
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mveleval, sizeof(float)*3) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mforce, sizeof(float)*3) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.poroForce, sizeof(float) * 3));
checkCudaErrors(cudaMalloc((void**)&fbuf.fluidForce, sizeof(float) * 3));
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mpress, sizeof(float) ) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mdensity, sizeof(float) ) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mgcell, sizeof(uint)) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mgndx, sizeof(uint)) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mclr, sizeof(uint)) );
checkCudaErrors ( cudaMalloc((void**)&fbuf.delta_density, sizeof(float)));
checkCudaErrors ( cudaMalloc((void**)&fbuf.aii, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.pressForce, sizeof(float) * 3));
checkCudaErrors ( cudaMalloc((void**)&fbuf.rest_volume, sizeof(float)));
checkCudaErrors ( cudaMalloc((void**)&fbuf.volume, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.rest_colorValue, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.colorValue, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.colorTensor, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.source, sizeof(float)));
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.msortbuf, sizeof(uint) ) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mgrid, 1 ) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mgridcnt, 1 ) );
//new sort
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.midsort, 1 ) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mgridoff, 1 ) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mgridactive, 1 ) );
//checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mcluster, sizeof(uint) ) );
//implicit SPH formulation for elastic body
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.gradDeform, 1 ));
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.Rotation, 1));
//checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mf_fluidPercent, sizeof(float)));
//checkCudaErrors ( cudaMalloc ( (void**) &fbuf.poroDriftVel, sizeof(float3)));
//checkCudaErrors ( cudaMalloc ( (void**) &fbuf.percentChange, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.divDarcyFlux, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.isInside, sizeof(bool)));
//checkCudaErrors ( cudaMalloc ( (void**) &fbuf.CorrectL, 1 ) );
checkCudaErrors(cudaMalloc((void**)&fbuf.SurfaceForce, sizeof(float3)));
//elastic information
checkCudaErrors(cudaMalloc((void**)&fbuf.elasticID, sizeof(uint)));
checkCudaErrors(cudaMalloc((void**)&fbuf.particleID, sizeof(uint)));
checkCudaErrors(cudaMalloc((void**)&fbuf.initialVolume, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.neighborID, sizeof(uint)));
checkCudaErrors(cudaMalloc((void**)&fbuf.neighborDistance, sizeof(float3)));
checkCudaErrors(cudaMalloc((void**)&fbuf.kernelGrad, sizeof(float3)));
checkCudaErrors(cudaMalloc((void**)&fbuf.kernelRotate, sizeof(float3)));
checkCudaErrors(cudaMalloc((void**)&fbuf.neighborNum, sizeof(uint)));
checkCudaErrors(cudaMalloc((void**)&fbuf.neighborIndex, sizeof(uint)));
//checkCudaErrors(cudaMalloc((void**)&fbuf.colorField, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.volumetricStrain, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.normal, sizeof(float3)));
checkCudaErrors(cudaMalloc((void**)&fbuf.isHead, sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&fbuf.frame, sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&fbuf.bx, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.by, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.bz, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.vx, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.vy, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.vz, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.rx, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.ry, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.rz, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.r2x, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.r2y, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.r2z, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.px, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.py, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.pz, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.Apx, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.Apy, sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.Apz, sizeof(float)));
//porous
//checkCudaErrors(cudaMalloc((void**)&fbuf.porosity, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.density_solid, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.pressure_water, sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.totalDis, sizeof(float)*MAX_SOLIDNUM));
checkCudaErrors(cudaMalloc((void**)&fbuf.solidCount, sizeof(int)));
//checkCudaErrors(cudaMalloc((void**)&fbuf.Saturation, sizeof(float)));
//checkCudaErrors(cudaMalloc((void**)&fbuf.AbsorbedFluidVolume, sizeof(float)));
//checkCudaErrors(cudaMalloc((void**)&fbuf.Saturation, sizeof(float)));
//checkCudaErrors(cudaMalloc((void**)&fbuf.DeltaSaturation, sizeof(float)));
//checkCudaErrors(cudaMalloc((void**)&fbuf.elasticVolume, sizeof(float)));
//checkCudaErrors(cudaMalloc((void**)&fbuf.gradPressure, sizeof(float3)));
checkCudaErrors(cudaMalloc((void**)&fbuf.poroVel, sizeof(float3)));
//checkCudaErrors(cudaMalloc((void**)&fbuf.fluidVel, sizeof(float3)));
preallocBlockSumsInt ( 1 );
};
int iDivUp (int a, int b) {
return (a % b != 0) ? (a / b + 1) : (a / b);
}
inline bool isPowerOfTwo(int n) { return ((n&(n-1))==0) ; }
inline int floorPow2(int n) {
#ifdef WIN32
return 1 << (int)logb((float)n);
#else
int exp;
frexp((float)n, &exp);
return 1 << (exp - 1);
#endif
}
// Compute number of blocks to create
void computeNumBlocks (int numPnts, int maxThreads, int &numBlocks, int &numThreads)
{
numThreads = min( maxThreads, numPnts );
numBlocks = iDivUp ( numPnts, numThreads );
}
void FluidClearCUDA ()
{
checkCudaErrors ( cudaFree ( fbuf.mpos ) );
checkCudaErrors ( cudaFree ( fbuf.maccel ) );
checkCudaErrors ( cudaFree ( fbuf.vel_mid));
checkCudaErrors ( cudaFree ( fbuf.mveleval ) );
checkCudaErrors ( cudaFree ( fbuf.mforce ) );
checkCudaErrors ( cudaFree ( fbuf.poroForce));
checkCudaErrors(cudaFree(fbuf.fluidForce));
checkCudaErrors ( cudaFree ( fbuf.mpress ) );
checkCudaErrors ( cudaFree ( fbuf.mdensity ) );
checkCudaErrors ( cudaFree ( fbuf.mgcell ) );
checkCudaErrors ( cudaFree ( fbuf.mgndx ) );
checkCudaErrors ( cudaFree ( fbuf.mclr ) );
#ifdef NEW_BOUND
checkCudaErrors ( cudaFree ( fbuf.misbound ) );
#endif
//checkCudaErrors ( cudaFree ( fbuf.mcluster ) );
//multi fluid
checkCudaErrors ( cudaFree ( fbuf.mf_alpha ) );
checkCudaErrors ( cudaFree ( fbuf.mf_alpha_next ) );
//checkCudaErrors ( cudaFree ( fbuf.mf_pressure_modify ) );
checkCudaErrors ( cudaFree ( fbuf.mf_vel_phrel) );
checkCudaErrors ( cudaFree ( fbuf.mf_restdensity ) );
checkCudaErrors ( cudaFree ( fbuf.mf_restdensity_out));
checkCudaErrors ( cudaFree ( fbuf.mf_restmass ) );
checkCudaErrors ( cudaFree ( fbuf.mf_alpha_sum));
checkCudaErrors ( cudaFree ( fbuf.mf_visc ) );
//checkCudaErrors ( cudaFree ( fbuf.mf_velxcor ) );
//checkCudaErrors ( cudaFree ( fbuf.mf_alphagrad ) );
checkCudaErrors(cudaFree(fbuf.mf_alphachange));
//checkCudaErrors ( cudaFree ( fbuf.density_fluid ) );
checkCudaErrors ( cudaFree ( fbuf.msortbuf ) );
checkCudaErrors ( cudaFree ( fbuf.mgrid ) );
checkCudaErrors ( cudaFree ( fbuf.mgridcnt ) );
//new sort
checkCudaErrors ( cudaFree ( fbuf.midsort ) );
checkCudaErrors ( cudaFree ( fbuf.mgridoff ) );
checkCudaErrors ( cudaFree ( fbuf.mgridactive ) );
//an implicit SPH formulation for elastic body
checkCudaErrors ( cudaFree(fbuf.gradDeform));
checkCudaErrors ( cudaFree(fbuf.elasticID));
checkCudaErrors ( cudaFree(fbuf.Rotation));
//checkCudaErrors ( cudaFree(fbuf.mf_fluidPercent));
//checkCudaErrors ( cudaFree(fbuf.poroDriftVel));
//checkCudaErrors ( cudaFree(fbuf.percentChange));
checkCudaErrors(cudaFree(fbuf.divDarcyFlux));
checkCudaErrors(cudaFree(fbuf.isInside));
//checkCudaErrors(cudaFree(fbuf.CorrectL));
checkCudaErrors(cudaFree(fbuf.SurfaceForce));
//elastic information
checkCudaErrors(cudaFree(fbuf.particleID));
checkCudaErrors(cudaFree(fbuf.initialVolume));
checkCudaErrors(cudaFree(fbuf.neighborNum));
checkCudaErrors(cudaFree(fbuf.neighborID));
checkCudaErrors(cudaFree(fbuf.neighborDistance));
checkCudaErrors(cudaFree(fbuf.kernelGrad));
checkCudaErrors(cudaFree(fbuf.kernelRotate));
checkCudaErrors(cudaFree(fbuf.neighborIndex));
//checkCudaErrors(cudaFree(fbuf.colorField));
checkCudaErrors(cudaFree(fbuf.volumetricStrain));
checkCudaErrors(cudaFree(fbuf.bx)); checkCudaErrors(cudaFree(fbuf.by)); checkCudaErrors(cudaFree(fbuf.bz));
checkCudaErrors(cudaFree(fbuf.vx)); checkCudaErrors(cudaFree(fbuf.vy)); checkCudaErrors(cudaFree(fbuf.vz));
checkCudaErrors(cudaFree(fbuf.rx)); checkCudaErrors(cudaFree(fbuf.ry)); checkCudaErrors(cudaFree(fbuf.rz));
checkCudaErrors(cudaFree(fbuf.r2x)); checkCudaErrors(cudaFree(fbuf.r2y)); checkCudaErrors(cudaFree(fbuf.r2z));
checkCudaErrors(cudaFree(fbuf.px)); checkCudaErrors(cudaFree(fbuf.py)); checkCudaErrors(cudaFree(fbuf.pz));
checkCudaErrors(cudaFree(fbuf.Apx)); checkCudaErrors(cudaFree(fbuf.Apy)); checkCudaErrors(cudaFree(fbuf.Apz));
checkCudaErrors(cudaFree(fbuf.normal));
checkCudaErrors(cudaFree(fbuf.isHead));
checkCudaErrors(cudaFree(fbuf.frame));
checkCudaErrors(cudaFree(fbuf.isSurface));
//porous
//checkCudaErrors(cudaFree(fbuf.porosity));
checkCudaErrors(cudaFree(fbuf.density_solid));
checkCudaErrors(cudaFree(fbuf.pressure_water));
checkCudaErrors(cudaFree(fbuf.solidCount));
checkCudaErrors(cudaFree(fbuf.totalDis));
//checkCudaErrors(cudaFree(fbuf.AbsorbedFluidVolume));
//checkCudaErrors(cudaFree(fbuf.Saturation));
//checkCudaErrors(cudaFree(fbuf.DeltaSaturation));
//checkCudaErrors(cudaFree(fbuf.elasticVolume));
//checkCudaErrors(cudaFree(fbuf.gradPressure));
checkCudaErrors(cudaFree(fbuf.poroVel));
//checkCudaErrors(cudaFree(fbuf.fluidVel));
//IISPH
checkCudaErrors(cudaFree(fbuf.aii));
checkCudaErrors(cudaFree(fbuf.pressForce));
checkCudaErrors(cudaFree(fbuf.delta_density));
//pressure boundary for IISPH
checkCudaErrors(cudaFree(fbuf.volume));
checkCudaErrors(cudaFree(fbuf.rest_volume));
checkCudaErrors(cudaFree(fbuf.source));
checkCudaErrors(cudaFree(fbuf.colorValue));
checkCudaErrors(cudaFree(fbuf.rest_colorValue));
}
void FluidSetupRotationCUDA ( float pan_r,float omega,int loadwhich, float capillaryForceRatio)
{
fcuda.pan_r = pan_r;
fcuda.omega = omega;
fcuda.loadwhich = loadwhich;
fcuda.capillaryForceRatio = capillaryForceRatio;
}
float FluidSetupCUDA ( int num, int gsrch, int3 res, float3 size, float3 delta, float3 gmin, float3 gmax, int total, int chk)
{
float cudaMem = 0;
fcuda.pnum = num;
fcuda.gridRes = res;
fcuda.gridSize = size;
fcuda.gridDelta = delta;
fcuda.gridMin = gmin;
fcuda.gridMax = gmax;
fcuda.gridTotal = total;
fcuda.gridSrch = gsrch;
fcuda.gridAdjCnt = gsrch*gsrch*gsrch;
fcuda.gridScanMax = res;
fcuda.gridScanMax -= make_int3( fcuda.gridSrch, fcuda.gridSrch, fcuda.gridSrch );
fcuda.chk = chk;
fcuda.mf_up=0;
// Build Adjacency Lookup
int cell = 0;
for (int y=0; y < gsrch; y++ )
for (int z=0; z < gsrch; z++ )
for (int x=0; x < gsrch; x++ )
fcuda.gridAdj [ cell++] = ( y * fcuda.gridRes.z+ z )*fcuda.gridRes.x + x ;
printf ( "CUDA Adjacency Table\n");
for (int n=0; n < fcuda.gridAdjCnt; n++ ) {
printf ( " ADJ: %d, %d\n", n, fcuda.gridAdj[n] );
}
// Compute number of blocks and threads
computeNumBlocks ( fcuda.pnum, 384, fcuda.numBlocks, fcuda.numThreads); // particles
computeNumBlocks ( fcuda.gridTotal, 384, fcuda.gridBlocks, fcuda.gridThreads); // grid cell
// Allocate particle buffers
fcuda.szPnts = (fcuda.numBlocks * fcuda.numThreads);
printf ( "CUDA Allocate: \n" );
printf ( " Pnts: %d, t:%dx%d=%d, Size:%d\n", fcuda.pnum, fcuda.numBlocks, fcuda.numThreads, fcuda.numBlocks*fcuda.numThreads, fcuda.szPnts);
printf ( " Grid: %d, t:%dx%d=%d, bufGrid:%d, Res: %dx%dx%d\n", fcuda.gridTotal, fcuda.gridBlocks, fcuda.gridThreads, fcuda.gridBlocks*fcuda.gridThreads, fcuda.szGrid, (int) fcuda.gridRes.x, (int) fcuda.gridRes.y, (int) fcuda.gridRes.z );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mpos, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(cudaMalloc((void**)&fbuf.mveleval, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(cudaMalloc((void**)&fbuf.mpress, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.mgcell, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
checkCudaErrors(cudaMalloc((void**)&fbuf.mgndx, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
int temp_size = EMIT_BUF_RATIO*(2 * (sizeof(float) * 3) + sizeof(float)+ 2 *sizeof(uint));
#ifdef NEW_BOUND
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.misbound, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(int)) );
temp_size += EMIT_BUF_RATIO*sizeof(int);
#endif
checkCudaErrors(cudaMalloc((void**)&fbuf.isInside, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(bool)));
temp_size += EMIT_BUF_RATIO * sizeof(bool);
//multi fluid
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mf_alpha, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(float)*MAX_FLUIDNUM )); //float* num
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mf_alpha_next, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(float)*MAX_FLUIDNUM ) ); //float* num
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mf_restmass, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
temp_size += EMIT_BUF_RATIO*(2*MAX_FLUIDNUM*sizeof(float) + sizeof(float));
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.MFtype, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(int) ) ); //indicator function
temp_size += EMIT_BUF_RATIO*(sizeof(int));
//an implicit SPH formulation for elastic body
checkCudaErrors ( cudaMalloc ( (void**)&fbuf.elasticID, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
checkCudaErrors ( cudaMalloc ( (void**)&fbuf.mf_beta, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)*MAX_FLUIDNUM * MAX_SOLIDNUM));
checkCudaErrors ( cudaMalloc ( (void**)&fbuf.mf_beta_next, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * MAX_FLUIDNUM * MAX_SOLIDNUM));
temp_size += EMIT_BUF_RATIO*(2*sizeof(float)*MAX_FLUIDNUM* MAX_SOLIDNUM +sizeof(uint));
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.msortbuf, EMIT_BUF_RATIO*fcuda.szPnts*temp_size ) );
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts*temp_size * 2;
//no sort values
checkCudaErrors(cudaMalloc((void**)&fbuf.density_solid, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.gradDeform, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 9));
checkCudaErrors(cudaMalloc((void**)&fbuf.Rotation, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 9));
checkCudaErrors(cudaMalloc((void**)&fbuf.mf_vel_phrel, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3 * MAX_FLUIDNUM)); //float*3*num
checkCudaErrors(cudaMalloc((void**)&fbuf.mf_restdensity, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.mf_restdensity_out, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts *(21 * sizeof(float) + sizeof(float) * 3 * MAX_FLUIDNUM);
checkCudaErrors(cudaMalloc((void**)&fbuf.mf_alpha_sum, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.mf_visc, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.maccel, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(cudaMalloc((void**)&fbuf.mforce, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(cudaMalloc((void**)&fbuf.mdensity, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.mgcell, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
checkCudaErrors(cudaMalloc((void**)&fbuf.mgndx, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
checkCudaErrors(cudaMalloc((void**)&fbuf.mclr, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(uint)));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts * (12 * sizeof(float));
checkCudaErrors(cudaMalloc((void**)&fbuf.mf_alphachange, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)*MAX_FLUIDNUM)); //float* num
checkCudaErrors(cudaMalloc((void**)&fbuf.vel_mid, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(cudaMalloc((void**)&fbuf.poroForce, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
checkCudaErrors(cudaMalloc((void**)&fbuf.fluidForce, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts * (9 * sizeof(float) + sizeof(float)*MAX_FLUIDNUM);
checkCudaErrors(cudaMalloc((void**)&fbuf.pressure_water, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM));
checkCudaErrors(cudaMalloc((void**)&fbuf.gradPressure, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float3)*MAX_FLUIDNUM*MAX_SOLIDNUM));
checkCudaErrors(cudaMalloc((void**)&fbuf.totalDis, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)*MAX_SOLIDNUM));
checkCudaErrors(cudaMalloc((void**)&fbuf.solidCount, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(int)*MAX_SOLIDNUM));
checkCudaErrors(cudaMalloc((void**)&fbuf.divDarcyFlux, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM));
//checkCudaErrors(cudaMalloc((void**)&fbuf.isInside, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts * ( sizeof(float) + 2* sizeof(float)*MAX_SOLIDNUM + 5 * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM);
checkCudaErrors(cudaMalloc((void**)&fbuf.SurfaceForce, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float3)));
checkCudaErrors(cudaMalloc((void**)&fbuf.aii, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.delta_density, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.pressForce, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 3));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts * (8 * sizeof(float));
//pressure boundary for IISPH
checkCudaErrors(cudaMalloc((void**)&fbuf.rest_volume, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.volume, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.source, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.colorValue, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.rest_colorValue, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.colorTensor, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float) * 9));
cudaMem += EMIT_BUF_RATIO*fcuda.szPnts * (14 * sizeof(float));
checkCudaErrors(cudaMalloc((void**)&fbuf.poroVel, EMIT_BUF_RATIO*fcuda.szPnts * sizeof(float3)*MAX_FLUIDNUM*MAX_SOLIDNUM));
// Allocate grid
fcuda.szGrid = (fcuda.gridBlocks * fcuda.gridThreads);
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mgrid, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(int) ) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mgridcnt, fcuda.szGrid*sizeof(int) ) );
//new sort
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.midsort, EMIT_BUF_RATIO*fcuda.szPnts*sizeof(uint) ) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mgridoff, fcuda.szGrid*sizeof(int) ) );
checkCudaErrors ( cudaMalloc ( (void**) &fbuf.mgridactive, fcuda.szGrid*sizeof(int) ) );
checkCudaErrors ( cudaMemcpyToSymbol ( simData, &fcuda, sizeof(FluidParams) ) );
cudaThreadSynchronize ();
// Prefix Sum - Preallocate Block sums for Sorting
deallocBlockSumsInt ();
preallocBlockSumsInt ( fcuda.gridTotal );
return cudaMem;
}
float ElasticSetupCUDA(int num,float miu,float lambda,float porosity,float* permeabilityRatio,int maxNeighborNum, float *pressRatio, float stRatio)
{
float CudaMem = 0;
fcuda.numElasticPoints = num;
fcuda.maxNeighborNum = maxNeighborNum;
printf("max neighbor num is %d\n",maxNeighborNum);
fcuda.miu = miu;
fcuda.lambda = lambda;
fcuda.rest_porosity = porosity;
fcuda.stRatio = stRatio;
for (int i = 0; i < MAX_FLUIDNUM*MAX_SOLIDNUM; ++i)
{
fcuda.mf_permeability[i] = permeabilityRatio[i];
//printf("permeability %d:%15f\n", i, permeabilityRatio[i]);
std::cout << "permeability " << i << ":" << 10000000000*permeabilityRatio[i];
fcuda.pressRatio[i] = pressRatio[i];
printf("pressure ratio:%f\n", fcuda.pressRatio[i]);
}
//fcuda.rest_permeability = permeability;
//elastic information
checkCudaErrors(cudaMalloc((void**)&fbuf.particleID, fcuda.numElasticPoints *sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&fbuf.neighborNum, fcuda.numElasticPoints * sizeof(uint)));
checkCudaErrors(cudaMalloc((void**)&fbuf.initialVolume, fcuda.numElasticPoints *sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.normal, fcuda.numElasticPoints * sizeof(float3)));
checkCudaErrors(cudaMalloc((void**)&fbuf.isSurface, fcuda.numElasticPoints * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&fbuf.isHead, fcuda.numElasticPoints * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&fbuf.frame, fcuda.numElasticPoints * sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&fbuf.volumetricStrain, fcuda.numElasticPoints * sizeof(float)));
CudaMem += fcuda.numElasticPoints * (7 * sizeof(float) + sizeof(float3));
checkCudaErrors(cudaMalloc((void**)&fbuf.bx, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.by, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.bz, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.vx, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.vy, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.vz, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.rx, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.ry, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.rz, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.r2x, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.r2y, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.r2z, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.px, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.py, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.pz, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.Apx, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.Apy, fcuda.numElasticPoints * sizeof(float))); checkCudaErrors(cudaMalloc((void**)&fbuf.Apz, fcuda.numElasticPoints * sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&fbuf.neighborID, fcuda.numElasticPoints *sizeof(uint)* maxNeighborNum));
checkCudaErrors(cudaMalloc((void**)&fbuf.kernelRotate, fcuda.numElasticPoints * sizeof(float3) * maxNeighborNum));
checkCudaErrors(cudaMalloc((void**)&fbuf.neighborDistance, fcuda.numElasticPoints *sizeof(float3)* maxNeighborNum));
checkCudaErrors(cudaMalloc((void**)&fbuf.kernelGrad, fcuda.numElasticPoints * sizeof(float3) * maxNeighborNum));
checkCudaErrors(cudaMalloc((void**)&fbuf.neighborIndex, fcuda.numElasticPoints * sizeof(uint) * maxNeighborNum));
CudaMem += fcuda.numElasticPoints *maxNeighborNum*(2 * sizeof(uint) + 3 * sizeof(float3));
cudaThreadSynchronize();
return CudaMem;
}
void PorousParamCUDA(float bulkModulus_porous, float bulkModulus_grains, float bulkModulus_solid, float bulkModulus_fluid, float poroDeformStrength, float capillary, float relax2)
{
fcuda.bulkModulus_porous = bulkModulus_porous;
fcuda.bulkModulus_grains = bulkModulus_grains;
fcuda.bulkModulus_solid = bulkModulus_solid;
fcuda.bulkModulus_fluid = bulkModulus_fluid;
fcuda.poroDeformStrength = poroDeformStrength;
fcuda.relax2 = relax2;
float alpha = 1 - bulkModulus_porous / bulkModulus_grains;
fcuda.CoCompressibility = bulkModulus_solid*bulkModulus_fluid / ((alpha - fcuda.rest_porosity)*bulkModulus_fluid + fcuda.rest_porosity*bulkModulus_solid);
fcuda.capillary = capillary;
printf("CoCompressibility is %f\n", fcuda.CoCompressibility);
}
void FluidParamCUDA ( float ss, float sr, float pr, float mass, float rest, float3 bmin, float3 bmax, float estiff, float istiff,float pbstiff, float visc, float damp, float fmin, float fmax, float ffreq, float gslope, float gx, float gy, float gz, float al, float vl )
{
fcuda.psimscale = ss;
fcuda.psmoothradius = sr;
fcuda.pradius = pr;
fcuda.r2 = sr * sr;
fcuda.pmass = mass;
fcuda.prest_dens = rest;
fcuda.pboundmin = bmin;
fcuda.pboundmax = bmax;
fcuda.pextstiff = estiff;
fcuda.pintstiff = istiff;
fcuda.pbstiff = pbstiff;
fcuda.pvisc = visc;
fcuda.pdamp = damp;
fcuda.pforce_min = fmin;
fcuda.pforce_max = fmax;
fcuda.pforce_freq = ffreq;
fcuda.pground_slope = gslope;
fcuda.pgravity = make_float3( gx, gy, gz );
fcuda.AL = al;
fcuda.AL2 = al * al;
fcuda.VL = vl;
fcuda.VL2 = vl * vl;
printf ( "Bound Min: %f %f %f\n", bmin.x, bmin.y, bmin.z );
printf ( "Bound Max: %f %f %f\n", bmax.x, bmax.y, bmax.z );
fcuda.pdist = pow ( fcuda.pmass / fcuda.prest_dens, 1/3.0f );
fcuda.poly6kern = 315.0f / (64.0f * 3.141592 * pow( sr, 9.0f) );
fcuda.spikykern = -45.0f / (3.141592 * pow( sr, 6.0f) );
fcuda.lapkern = 45.0f / (3.141592 * pow( sr, 6.0f) );
//fcuda.CubicSplineKern1 = 1 / (4 * 3.141592*pow(sr, 3));
//fcuda.CubicSplineKern2 = 1 / (3.141592*pow(sr, 3));
fcuda.CubicSplineKern = 8 / (3.141592*pow(sr, 3));
fcuda.gradCubicSplineKern = 48 / (3.141592*pow(sr, 4));
fcuda.CubicSplineKern1 = 1 / (4 * 3.141592*pow(sr, 3));
fcuda.CubicSplineKern2 = 8 / (3.141592*pow(sr, 3));
fcuda.gradCubicSplineKern1 = -3 / (4 * 3.141592*pow(sr, 4));
fcuda.gradCubicSplineKern2 = 1 / (3.141592*pow(sr, 4));
//printf("fcuda.gradCubicSplineKern1 is %f,fcuda.gradCubicSplineKern2 is %f,fcuda.spikykern is %f\n",
// fcuda.gradCubicSplineKern1, fcuda.gradCubicSplineKern2, fcuda.spikykern);
checkCudaErrors( cudaMemcpyToSymbol ( simData, &fcuda, sizeof(FluidParams) ) );
cudaThreadSynchronize ();
}
void ParamUpdateCUDA(bool hidebound, bool hidefluid, bool hidesolid, bool hiderigid, float* colorValue)
{
fcuda.HideBound = hidebound;
fcuda.HideFluid = hidefluid;
fcuda.HideSolid = hidesolid;
fcuda.HideRigid = hiderigid;
for(int i=0;i<MAX_FLUIDNUM;++i)
fcuda.colorValue[i] = colorValue[i];
checkCudaErrors(cudaMemcpyToSymbol(simData, &fcuda, sizeof(FluidParams)));
cudaThreadSynchronize();
}
void FluidParamCUDA_projectu(float visc_factor, float fluid_pfactor,float solid_pfactor,float bdamp)
{
fcuda.visc_factor = visc_factor;
fcuda.fluid_pfactor = fluid_pfactor;
fcuda.solid_pfactor = solid_pfactor;
fcuda.bdamp = bdamp;
fcuda.gravityfree = 0;
}
void FluidMfParamCUDA ( float *dens, float *visc, float *mass, float diffusion, float catnum, float dt, float3 cont, float3 mb1,float3 mb2, float relax,int example)
{
fcuda.mf_catnum = catnum;
fcuda.mf_diffusion = diffusion;
fcuda.mf_dt = dt;
for(int i=0;i<MAX_FLUIDNUM;i++)
{
fcuda.mf_dens[i] = dens[i];
fcuda.mf_visc[i] = visc[i];
fcuda.mf_mass[i] = mass[i];
}
fcuda.mf_multiFlagPNum = 0;
//fcuda.mf_splitVolume = splitV;
//fcuda.mf_mergeVolume = mergeV;
fcuda.mf_maxPnum = fcuda.pnum * EMIT_BUF_RATIO;
fcuda.cont = cont.x; fcuda.cont1 = cont.y; fcuda.cont2 = cont.z;
fcuda.mb1.x = mb1.x; fcuda.mb1.y = mb1.y; fcuda.mb1.z = mb1.z;
fcuda.mb2.x = mb2.x; fcuda.mb2.y = mb2.y; fcuda.mb2.z = mb2.z;
fcuda.bxmin = mb1.x; fcuda.by = mb1.y; fcuda.bzmin = mb1.z;
fcuda.bxmax = mb2.x; fcuda.bzmax = mb2.z;
fcuda.relax = relax;
fcuda.example = example;
checkCudaErrors( cudaMemcpyToSymbol ( simData, &fcuda, sizeof(FluidParams) ) );
cudaThreadSynchronize ();
}
void preallocBlockSumsInt (unsigned int maxNumElements)
{
assert(g_numEltsAllocated == 0); // shouldn't be called
g_numEltsAllocated = maxNumElements;
unsigned int blockSize = BLOCK_SIZE; // max size of the thread blocks
unsigned int numElts = maxNumElements;
int level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) level++;
numElts = numBlocks;
} while (numElts > 1);
g_scanBlockSumsInt = (int**) malloc(level * sizeof(int*));
g_numLevelsAllocated = level;
numElts = maxNumElements;
level = 0;
do {
unsigned int numBlocks = max(1, (int)ceil((float)numElts / (2.f * blockSize)));
if (numBlocks > 1) checkCudaErrors ( cudaMalloc((void**) &g_scanBlockSumsInt[level++], numBlocks * sizeof(int)) );
numElts = numBlocks;
} while (numElts > 1);
}
void deallocBlockSumsInt()
{
for (unsigned int i = 0; i < g_numLevelsAllocated; i++) cudaFree(g_scanBlockSumsInt[i]);
free( (void**)g_scanBlockSumsInt );
g_scanBlockSumsInt = 0;
g_numEltsAllocated = 0;
g_numLevelsAllocated = 0;
}
//Copy buffers
void CopyToCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr)
{
// Send particle buffers
int numPoints = fcuda.pnum;
checkCudaErrors( cudaMemcpy ( fbuf.mpos, pos, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.maccel, vel, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mveleval, veleval, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mforce, force, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mpress, pressure, numPoints*sizeof(float), cudaMemcpyHostToDevice ) );
//checkCudaErrors( cudaMemcpy ( fbuf.mpress_pre, pressure, numPoints * sizeof(float), cudaMemcpyHostToDevice));
checkCudaErrors( cudaMemcpy ( fbuf.mdensity, density, numPoints*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mclr, clr, numPoints*sizeof(uint), cudaMemcpyHostToDevice ) );
cudaThreadSynchronize ();
}
void CopyMfToCUDA ( float* alpha, float* alpha_pre, float* pressure_modify, float* vel_phrel, float* restmass, float* restdensity, float* visc, float* velxcor, float* alphagrad)
{
// Send particle buffers
int numPoints = fcuda.pnum;
checkCudaErrors( cudaMemcpy ( fbuf.mf_alpha, alpha, numPoints*MAX_FLUIDNUM*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mf_alpha_next, alpha, numPoints*MAX_FLUIDNUM*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mf_vel_phrel, vel_phrel, numPoints*MAX_FLUIDNUM*sizeof(float)*3, cudaMemcpyHostToDevice ) );
//checkCudaErrors( cudaMemcpy ( fbuf.mf_alphagrad, alphagrad, numPoints*MAX_FLUIDNUM*sizeof(float)*3, cudaMemcpyHostToDevice ) );
//checkCudaErrors( cudaMemcpy ( fbuf.mf_pressure_modify, pressure_modify, numPoints*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mf_restmass, restmass, numPoints*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mf_restdensity, restdensity, numPoints*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mf_visc, visc, numPoints*sizeof(float), cudaMemcpyHostToDevice ) );
//checkCudaErrors( cudaMemcpy ( fbuf.mf_velxcor, velxcor, numPoints*sizeof(float)*3, cudaMemcpyHostToDevice ) );
//checkCudaErrors( cudaMemcpy ( fbuf.MFtype, mftype, numPoints*sizeof(int), cudaMemcpyHostToDevice ) );
cudaThreadSynchronize ();
}
void CopyBoundToCUDA (int* isbound )
{
int numPoints = fcuda.pnum;
checkCudaErrors( cudaMemcpy ( fbuf.misbound, isbound, numPoints*sizeof(int), cudaMemcpyHostToDevice ) );
cudaThreadSynchronize ();
}
void CopyToCUDA_Uproject(int* mftype)
{
int numPoints = fcuda.pnum;
checkCudaErrors( cudaMemcpy( fbuf.MFtype, mftype, numPoints*sizeof(int), cudaMemcpyHostToDevice));
cudaThreadSynchronize ();
}
void CopyToCUDA_elastic(uint* elasticID,float* porosity,float*signDistance)
{
int numPoints = fcuda.pnum;
int numElasticPoints = fcuda.numElasticPoints;
checkCudaErrors(cudaMemcpy(fbuf.elasticID, elasticID, numPoints * sizeof(uint), cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMemcpy(fbuf.porosity, porosity, numElasticPoints * sizeof(float), cudaMemcpyHostToDevice));
//checkCudaErrors(cudaMemcpy(fbuf.colorField, signDistance, numElasticPoints * sizeof(float), cudaMemcpyHostToDevice));
cudaThreadSynchronize();
}
void CopyFromCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr, int mode)
{
// Return particle buffers
int numPoints = fcuda.pnum;
//printf("sizeof(float3) is %d and sizeof(float) is %d\n", sizeof(float3), sizeof(float));
//printf("fbuf.mpos address : OX%p\n", fbuf.mpos);
//printf("numPoints is %d\n", numPoints);
if ( pos != 0x0 ) checkCudaErrors( cudaMemcpy ( pos, fbuf.mpos, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ) );
if ( clr != 0x0 ) checkCudaErrors( cudaMemcpy ( clr, fbuf.mclr, numPoints*sizeof(uint), cudaMemcpyDeviceToHost ) );
if( mode == 2){
checkCudaErrors( cudaMemcpy ( vel, fbuf.maccel, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy ( veleval, fbuf.mveleval, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy ( force, fbuf.mforce, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy ( pressure, fbuf.mpress, numPoints*sizeof(float), cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy ( density, fbuf.mdensity, numPoints*sizeof(float), cudaMemcpyDeviceToHost ) );
}
cudaThreadSynchronize ();
}
void CopyMfFromCUDA ( float* alpha, float* alpha_pre, float* pressure_modify, float* vel_phrel, float* restmass, float* restdensity, float* visc, float* velxcor, float* alphagrad, int mode)
{
int numPoints = fcuda.pnum;
checkCudaErrors( cudaMemcpy ( alpha, fbuf.mf_alpha, numPoints*MAX_FLUIDNUM*sizeof(float), cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy ( restmass, fbuf.mf_restmass, numPoints*sizeof(float), cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy ( restdensity, fbuf.mf_restdensity, numPoints*sizeof(float), cudaMemcpyDeviceToHost ) );
if( mode == 2){
// Send particle buffers
checkCudaErrors( cudaMemcpy ( alpha_pre, fbuf.mf_alpha_next, numPoints*MAX_FLUIDNUM*sizeof(float), cudaMemcpyDeviceToHost ) );
//checkCudaErrors( cudaMemcpy ( pressure_modify, fbuf.mf_pressure_modify, numPoints*sizeof(float), cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy ( vel_phrel, fbuf.mf_vel_phrel, numPoints*MAX_FLUIDNUM*sizeof(float)*3, cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy ( visc, fbuf.mf_visc, numPoints*sizeof(float), cudaMemcpyDeviceToHost ) );
//checkCudaErrors( cudaMemcpy ( velxcor, fbuf.mf_velxcor, numPoints*sizeof(float)*3, cudaMemcpyDeviceToHost ) );
//checkCudaErrors( cudaMemcpy ( alphagrad, fbuf.mf_alphagrad, numPoints*MAX_FLUIDNUM*sizeof(float)*3, cudaMemcpyDeviceToHost ) );
}
}
void CopyBoundFromCUDA (int* isbound )
{
int numPoints = fcuda.pnum;
if ( isbound != 0x0 ) checkCudaErrors( cudaMemcpy ( isbound, fbuf.misbound, numPoints*sizeof(int), cudaMemcpyDeviceToHost ) );
cudaThreadSynchronize ();
}
void CopyFromCUDA_Uproject(int* mftype, float*beta)
{
int numPoints = fcuda.pnum;
checkCudaErrors( cudaMemcpy( mftype, fbuf.MFtype, numPoints*sizeof(int), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(beta, fbuf.mf_beta, numPoints * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM, cudaMemcpyDeviceToHost));
cudaThreadSynchronize ();
}
//Called when particles emitted
void CopyEmitToCUDA ( float* pos, float* vel, float* veleval, float* force, float* pressure, float* density, uint* cluster, uint* gnext, char* clr, int startnum, int numcount,int* isbound )
{
// Send particle buffers
checkCudaErrors( cudaMemcpy ( fbuf.mpos+startnum, pos+startnum*3, numcount*sizeof(float)*3, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.maccel+startnum, vel+startnum*3, numcount*sizeof(float)*3, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mveleval+startnum, veleval+startnum*3, numcount*sizeof(float)*3, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mforce+startnum, force+startnum*3, numcount*sizeof(float)*3, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mpress+startnum, pressure+startnum, numcount*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mdensity+startnum, density+startnum, numcount*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mclr+startnum, clr+startnum, numcount*sizeof(uint), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.misbound + startnum, isbound + startnum, numcount*sizeof(int), cudaMemcpyHostToDevice ) );
cudaThreadSynchronize ();
}
void CopyEmitMfToCUDA ( float* alpha, float* alpha_pre, float* pressure_modify, float* vel_phrel, float* restmass, float* restdensity, float* visc, float* velxcor, float* alphagrad,int startnum, int numcount)
{
// Send particle buffers
int mulstartnum = startnum*MAX_FLUIDNUM;
checkCudaErrors( cudaMemcpy ( fbuf.mf_alpha + mulstartnum, alpha + mulstartnum, numcount*MAX_FLUIDNUM*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mf_alpha_next + mulstartnum, alpha_pre + mulstartnum, numcount*MAX_FLUIDNUM*sizeof(float), cudaMemcpyHostToDevice ) );
//checkCudaErrors( cudaMemcpy ( fbuf.mf_pressure_modify+startnum, pressure_modify+startnum, numcount*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mf_vel_phrel + mulstartnum, vel_phrel + mulstartnum*3, numcount*MAX_FLUIDNUM*sizeof(float)*3, cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mf_restmass+startnum, restmass+startnum, numcount*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mf_restdensity+startnum, restdensity+startnum, numcount*sizeof(float), cudaMemcpyHostToDevice ) );
checkCudaErrors( cudaMemcpy ( fbuf.mf_visc+startnum, visc+startnum, numcount*sizeof(float), cudaMemcpyHostToDevice ) );
//checkCudaErrors( cudaMemcpy ( fbuf.mf_velxcor+startnum, velxcor+startnum*3, numcount*sizeof(float)*3, cudaMemcpyHostToDevice ) );
//checkCudaErrors( cudaMemcpy ( fbuf.mf_alphagrad + mulstartnum, alphagrad + mulstartnum*3, numcount*MAX_FLUIDNUM*sizeof(float)*3, cudaMemcpyHostToDevice ) );
cudaThreadSynchronize ();
}
void UpdatePNumCUDA( int newPnum)
{
fcuda.pnum = newPnum;
computeNumBlocks ( fcuda.pnum, 384, fcuda.numBlocks, fcuda.numThreads); //threads changed!
fcuda.szPnts = (fcuda.numBlocks * fcuda.numThreads); //szPnts changed!
checkCudaErrors( cudaMemcpyToSymbol ( simData, &fcuda, sizeof(FluidParams) ) );
cudaThreadSynchronize ();
}
int MfGetPnum(){
return fcuda.pnum;
}
//Called in RunSimulateCudaFull
void InitialSortCUDA( uint* gcell, uint* ccell, int* gcnt )
{
cudaMemset ( fbuf.mgridcnt, 0, fcuda.gridTotal * sizeof(int));
cudaMemset ( fbuf.mgridoff, 0, fcuda.gridTotal * sizeof(int));
cudaMemset ( fbuf.mgcell, 0, fcuda.pnum * sizeof(uint));
InitialSort<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR: InsertParticlesCUDA: %s\n", cudaGetErrorString(error) );
}
cudaThreadSynchronize ();
// Transfer data back if requested (for validation)
if (gcell != 0x0) {
checkCudaErrors( cudaMemcpy ( gcell, fbuf.mgcell, fcuda.pnum*sizeof(uint), cudaMemcpyDeviceToHost ) );
checkCudaErrors( cudaMemcpy ( gcnt, fbuf.mgridcnt, fcuda.gridTotal*sizeof(int), cudaMemcpyDeviceToHost ) );
//checkCudaErrors( cudaMemcpy ( ccell, fbuf.mcluster, fcuda.pnum*sizeof(uint), cudaMemcpyDeviceToHost ) );
}
}
void SortGridCUDA( int* goff )
{
thrust::device_ptr<uint> dev_keysg(fbuf.mgcell);
thrust::device_ptr<uint> dev_valuesg(fbuf.midsort);
thrust::sort_by_key(dev_keysg,dev_keysg+fcuda.pnum,dev_valuesg);
cudaThreadSynchronize ();
CalcFirstCnt <<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
// cudaThreadSynchronize ();
cudaThreadSynchronize ();
GetCnt <<<fcuda.numBlocks,fcuda.numThreads>>> (fbuf,fcuda.pnum);
cudaThreadSynchronize ();
/*
uint* test,*test1;
test = (uint*)malloc(sizeof(uint)*fcuda.pnum);
test1 = (uint*)malloc(sizeof(uint)*fcuda.gridTotal);
cudaMemcpy(test,fbuf.mgcell,sizeof(uint)*fcuda.pnum,cudaMemcpyDeviceToHost);
cudaMemcpy(test1,fbuf.mgridoff,sizeof(uint)*fcuda.gridTotal,cudaMemcpyDeviceToHost);
for (int i = 0;i<fcuda.pnum;i++)
if (test[i]!=GRID_UNDEF)
printf("%u %u %u\n",test[i],test1[test[i]]);
*/
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR:SortGridCUDA: %s\n", cudaGetErrorString(error));
}
}
void CountingSortFullCUDA_( uint* ggrid )
{
// Transfer particle data to temp buffers
int n = fcuda.pnum;
cudaMemcpy ( fbuf.msortbuf + n*BUF_POS, fbuf.mpos, n*sizeof(float)*3, cudaMemcpyDeviceToDevice );
cudaMemcpy ( fbuf.msortbuf + n*BUF_VELEVAL, fbuf.mveleval, n*sizeof(float)*3, cudaMemcpyDeviceToDevice );
cudaMemcpy ( fbuf.msortbuf + n*BUF_PRESS, fbuf.mpress, n*sizeof(float), cudaMemcpyDeviceToDevice );
cudaMemcpy(fbuf.msortbuf + n*BUF_GCELL, fbuf.mgcell, n * sizeof(uint), cudaMemcpyDeviceToDevice);
cudaMemcpy(fbuf.msortbuf + n*BUF_GNDX, fbuf.mgndx, n * sizeof(uint), cudaMemcpyDeviceToDevice);
cudaMemcpy(fbuf.msortbuf + n*BUF_ISINSIDE, fbuf.isInside, n * sizeof(bool), cudaMemcpyDeviceToDevice);
#ifdef NEW_BOUND
cudaMemcpy(fbuf.msortbuf + n*BUF_ISBOUND, fbuf.misbound, n * sizeof(int), cudaMemcpyDeviceToDevice);
#endif
//multi fluid
cudaMemcpy(fbuf.msortbuf + n*BUF_ALPHA, fbuf.mf_alpha, n*MAX_FLUIDNUM * sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(fbuf.msortbuf + n*BUF_ALPHAPRE, fbuf.mf_alpha_next, n*MAX_FLUIDNUM * sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpy(fbuf.msortbuf + n*BUF_RMASS, fbuf.mf_restmass, n * sizeof(float), cudaMemcpyDeviceToDevice);
//porous
cudaMemcpy ( fbuf.msortbuf + n*BUF_INDICATOR, fbuf.MFtype, n*sizeof(int), cudaMemcpyDeviceToDevice );
//an implicit SPH formulation for elastic body
cudaMemcpy ( fbuf.msortbuf + n*BUF_ELASTICID, fbuf.elasticID, n * sizeof(uint), cudaMemcpyDeviceToDevice);
cudaMemcpy ( fbuf.msortbuf + n*BUF_ABSORBEDPERCENT, fbuf.mf_beta, n * MAX_FLUIDNUM * sizeof(float) * MAX_SOLIDNUM, cudaMemcpyDeviceToDevice);
cudaMemcpy(fbuf.msortbuf + n*BUF_BETANEXT, fbuf.mf_beta_next, n * MAX_FLUIDNUM * sizeof(float) * MAX_SOLIDNUM, cudaMemcpyDeviceToDevice);
//cudaMemcpy(fbuf.msortbuf + n*BUF_POROVEL, fbuf.poroVel, n *MAX_FLUIDNUM * sizeof(float3), cudaMemcpyDeviceToDevice);
// Counting Sort - pass one, determine grid counts
cudaMemset ( fbuf.mgrid, GRID_UCHAR, fcuda.pnum * sizeof(int) );
CountingSortFull_ <<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum);
cudaThreadSynchronize ();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR:Sorting Failed: %s\n", cudaGetErrorString(error) );
}
////checkCudaErrors(cudaMemcpyFromSymbol(&(fcuda.pnum), pNumFT, sizeof(int))); //total pnum changed!
////computeNumBlocks ( fcuda.pnum, 384, fcuda.numBlocks, fcuda.numThreads); //threads changed!
////fcuda.szPnts = (fcuda.numBlocks * fcuda.numThreads); //szPnts changed!
//// printf("pnum:%d,Blocknum:%d,Threadnum:%d\n",fcuda.pnum,fcuda.numBlocks,fcuda.numThreads);
////cudaThreadSynchronize ();
}
void initSPH(float* restdensity,int* mftype)
{
initDensity<<<fcuda.numBlocks, fcuda.numThreads>>>(fbuf, fcuda.pnum);
cudaThreadSynchronize();
}
void TestFunc()
{
testFunc << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: MfFindNearestVelCUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
}
void MfComputePressureCUDA ()
{
//mfFindNearest<<< fcuda.numBlocks, fcuda.numThreads>>> (fbuf, fcuda.pnum);
//cudaError_t error = cudaGetLastError();
//if (error != cudaSuccess) {
// fprintf ( stderr, "CUDA ERROR: MfFindNearestVelCUDA: %s\n", cudaGetErrorString(error) );
//}
//cudaThreadSynchronize ();
mfPreComputeDensity<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR: MfPreComputeDensityVelCUDA: %s\n", cudaGetErrorString(error) );
}
cudaThreadSynchronize ();
mfComputePressure<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR: MfComputePressureVelCUDA: %s\n", cudaGetErrorString(error) );
}
cudaThreadSynchronize ();
}
void MfPredictAdvection(float time)
{
applyAlphaAndBeta << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: MfComputeDriftVelCUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
FindNearbySolid << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute fluid percent change CUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeSolidPorePressure << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute pore pressure CUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
//step1:compute density
mfPreComputeDensity << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: MfPreComputeDensityVelCUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeOtherForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, time);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: MfComputeOtherForceCUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
//step2:compute intermediate velocity
computeMidVel << <fcuda.numBlocks, fcuda.numThreads >> >(fbuf, fcuda.pnum);
//updateVelocity << <fcuda.numBlocks, fcuda.numThreads >> >(time, fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: Compute mid vel: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeBRestVolume << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: Compute rest volume: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeVolume << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: Compute volume: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeSource << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: Compute source: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeAII << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: ComputeAII: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
}
void PressureSolve(int fluid_beginIndex,int fluid_endIndex)
{
int l = 0;
float averror;
float sum, length = fluid_endIndex - fluid_beginIndex;
float eta = 0.1;
cudaError_t error;
float last_error = 1;
do {
//iterate compute pressure
l++;
//upgrade force to compute the error
ComputePressureForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: ComputePressureForce: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeCriterion << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: Compute Criterion: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
thrust::device_ptr<float> dev_deltadens(fbuf.delta_density);
thrust::device_vector<float> deltadens(dev_deltadens + fluid_beginIndex, dev_deltadens + fluid_endIndex);
//averror = thrust::reduce(deltadens.begin(), deltadens.end()) / thrust::reduce(dens.begin(), dens.end());
averror = thrust::reduce(deltadens.begin(), deltadens.end()) / (fluid_endIndex - fluid_beginIndex);
//printf("the %dth iteration over.\n", l);
//if (l > 10)
// break;
if (abs(averror-last_error)/last_error < 0.001||l>100)
break;
last_error = averror;
} while (l<3 || abs(averror)>eta);
ApplyPressureForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: ComputePressureForce: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
}
void MfComputeDriftVelCUDA ()
{
cudaError_t error;
ComputeSolidPorePressure << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute pore pressure CUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
mfComputeDriftVel<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR: MfComputeDriftVelCUDA: %s\n", cudaGetErrorString(error) );
}
cudaThreadSynchronize ();
}
void MfComputeAlphaAdvanceCUDA ()
{
cudaError_t error;
//mfComputeDriftVel << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = cudaGetLastError();
//if (error != cudaSuccess) {
// fprintf(stderr, "CUDA ERROR: MfComputeDriftVelCUDA: %s\n", cudaGetErrorString(error));
//}
//cudaThreadSynchronize();
//mfComputeTDM << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = cudaGetLastError();
//if (error != cudaSuccess) {
// fprintf(stderr, "CUDA ERROR: MfComputeTDM CUDA: %s\n", cudaGetErrorString(error));
//}
//cudaThreadSynchronize();
//mfComputeAlphaAdvance << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = cudaGetLastError();
//if (error != cudaSuccess) {
// fprintf(stderr, "CUDA ERROR: MfComputeAlphaAdvanceCUDA: %s\n", cudaGetErrorString(error));
//}
//cudaThreadSynchronize();
//ComputeFluidAdvance << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = cudaGetLastError();
//if (error != cudaSuccess) {
// fprintf(stderr, "CUDA ERROR: compute fluid advance CUDA: %s\n", cudaGetErrorString(error));
//}
//cudaThreadSynchronize();
mfComputeCorrection << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: MfComputeCorrectionCUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
}
void MfComputeCorrectionCUDA ()
{
/*if(fcuda.example == 5)
mfComputeCorrection5<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
else*/
mfComputeCorrection<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf ( stderr, "CUDA ERROR: MfComputeCorrectionCUDA: %s\n", cudaGetErrorString(error) );
}
cudaThreadSynchronize ();
}
//void ComputeForceCUDA_ProjectU(float time)
//{
// ////计算公式(8)除去T_Sm之外的项
// //ComputeForce_projectu<<< fcuda.numBlocks, fcuda.numThreads>>> ( fbuf, fcuda.pnum );
// //cudaError_t error = cudaGetLastError();
// //if (error != cudaSuccess)
// // fprintf ( stderr, "CUDA ERROR: MfComputeForceCUDA: %s\n", cudaGetErrorString(error) );
// //cudaThreadSynchronize ();
//
// //cudaThreadSynchronize();
//
// //AddSPHtensorForce<<<fcuda.numBlocks, fcuda.numThreads>>>(fbuf, fcuda.pnum, time);
// //error = cudaGetLastError();
// //if (error != cudaSuccess)
// // fprintf ( stderr, "CUDA ERROR: Adding SPH tensor Force: %s\n", cudaGetErrorString(error) );
// //cudaThreadSynchronize ();
//
//
//}
//Mathematics
__device__ inline double RxPythag(const double a, const double b)
{
double absa = abs(a), absb = abs(b);
return (absa > absb ? absa*(double)sqrt((double)(1.0+(absb/absa)*(absb/absa))) :
(absb == 0.0 ? 0.0 : absb*(double)sqrt((double)(1.0+(absa/absb)*(absa/absb)))));
}
__device__ inline double RXD_MIN(const double &a, const double &b){ return ((a < b) ? a : b); }
__device__ inline double RXD_MAX(const double &a, const double &b){ return ((a > b) ? a : b); }
__device__ inline double RXD_SIGN2(const double &a, const double &b){ return b >= 0 ? (a >= 0 ? a : -a) : (a >= 0 ? -a : a); }
__device__ int svdecomp3(float w[3], float u[9], float v[9], float eps)
{
bool flag;
int i, its, j, jj, k, l, nm;
float anorm, c, f, g, h, s, scale, x, y, z;
float rv1[3];
g = scale = anorm = 0.0;
for(i = 0; i < 3; ++i){
l = i+2;
rv1[i] = scale*g;
g = s = scale = 0.0;
for(k = i; k < 3; ++k) scale += abs(u[k*3+i]);
if(scale != 0.0){
for(k = i; k < 3; ++k){
u[k*3+i] /= scale;
s += u[k*3+i]*u[k*3+i];
}
f = u[i*3+i];
g = -RXD_SIGN2(sqrt(s), f);
h = f*g-s;
u[i*3+i] = f-g;
for(j = l-1; j < 3; ++j){
for(s = 0.0, k = i; k < 3; ++k) s += u[k*3+i]*u[k*3+j];
f = s/h;
for(k = i; k < 3; ++k) u[k*3+j] += f*u[k*3+i];
}
for(k = i; k < 3; ++k) u[k*3+i] *= scale;
}
w[i] = scale*g;
g = s = scale = 0.0;
if(i+1 <= 3 && i+1 != 3){
for(k = l-1; k < 3; ++k) scale += abs(u[i*3+k]);
if(scale != 0.0){
for(k = l-1; k < 3; ++k){
u[i*3+k] /= scale;
s += u[i*3+k]*u[i*3+k];
}
f = u[i*3+l-1];
g = -RXD_SIGN2(sqrt(s), f);
h = f*g-s;
u[i*3+l-1] = f-g;
for(k = l-1; k < 3; ++k) rv1[k] = u[i*3+k]/h;
for(j = l-1; j < 3; ++j){
for(s = 0.0,k = l-1; k < 3; ++k) s += u[j*3+k]*u[i*3+k];
for(k = l-1; k < 3; ++k) u[j*3+k] += s*rv1[k];
}
for(k = l-1; k < 3; ++k) u[i*3+k] *= scale;
}
}
anorm = RXD_MAX(anorm, (abs(w[i])+abs(rv1[i])));
}
for(i = 2; i >= 0; --i){
if(i < 2){
if(g != 0.0){
for(j = l; j < 3; ++j){
v[j*3+i] = (u[i*3+j]/u[i*3+l])/g;
}
for(j = l; j < 3; ++j){
for(s = 0.0, k = l; k < 3; ++k) s += u[i*3+k]*v[k*3+j];
for(k = l; k < 3; ++k) v[k*3+j] += s*v[k*3+i];
}
}
for(j = l; j < 3; ++j) v[i*3+j] = v[j*3+i] = 0.0;
}
v[i*3+i] = 1.0;
g = rv1[i];
l = i;
}
for(i = 2; i >= 0; --i){
l = i+1;
g = w[i];
for(j = l; j < 3; ++j) u[i*3+j] = 0.0;
if(g != 0.0){
g = 1.0/g;
for(j = l; j < 3; ++j){
for(s = 0.0, k = l; k < 3; ++k) s += u[k*3+i]*u[k*3+j];
f = (s/u[i*3+i])*g;
for(k = i; k < 3; ++k) u[k*3+j] += f*u[k*3+i];
}
for(j = i; j < 3; ++j) u[j*3+i] *= g;
}
else{
for(j = i; j < 3; ++j) u[j*3+i] = 0.0;
}
++u[i*3+i];
}
for(k = 2; k >= 0; --k){
for(its = 0; its < 30; ++its){
flag = true;
for(l = k; l >= 0; --l){
nm = l-1;
if(l == 0 || abs(rv1[l]) <= eps*anorm){
flag = false;
break;
}
if(abs(w[nm]) <= eps*anorm) break;
}
if(flag){
c = 0.0;
s = 1.0;
for(i = l; i < k+1; ++i){
f = s*rv1[i];
rv1[i] = c*rv1[i];
if(abs(f) <= eps*anorm) break;
g = w[i];
h = RxPythag(f, g);
w[i] = h;
h = 1.0/h;
c = g*h;
s = -f*h;
for(j = 0; j < 3; ++j){
y = u[j*3+nm];
z = u[j*3+i];
u[j*3+nm] = y*c+z*s;
u[j*3+i] = z*c-y*s;
}
}
}
z = w[k];
if(l == k){
if(z < 0.0){
w[k] = -z;
for(j = 0; j < 3; ++j) v[j*3+k] = -v[j*3+k];
}
break;
}
if(its == 29){
//printf("no convergence in 30 svdcmp iterations");
return 0;
}
x = w[l];
nm = k-1;
y = w[nm];
g = rv1[nm];
h = rv1[k];
f = ((y-z)*(y+z)+(g-h)*(g+h))/(2.0*h*y);
g = RxPythag(f, 1.0f);
f = ((x-z)*(x+z)+h*((y/(f+RXD_SIGN2(g, f)))-h))/x;
c = s = 1.0;
for(j = l; j <= nm; ++j){
i = j+1;
g = rv1[i];
y = w[i];
h = s*g;
g = c*g;
z = RxPythag(f, h);
rv1[j] = z;
c = f/z;
s = h/z;
f = x*c+g*s;
g = g*c-x*s;
h = y*s;
y *= c;
for(jj = 0; jj < 3; ++jj){
x = v[jj*3+j];
z = v[jj*3+i];
v[jj*3+j] = x*c+z*s;
v[jj*3+i] = z*c-x*s;
}
z = RxPythag(f, h);
w[j] = z;
if(z){
z = 1.0/z;
c = f*z;
s = h*z;
}
f = c*g+s*y;
x = c*y-s*g;
for(jj = 0; jj < 3; ++jj){
y = u[jj*3+j];
z = u[jj*3+i];
u[jj*3+j] = y*c+z*s;
u[jj*3+i] = z*c-y*s;
}
}
rv1[l] = 0.0;
rv1[k] = f;
w[k] = x;
}
}
// reorder
int inc = 1;
float sw;
float su[3], sv[3];
do{
inc *= 3;
inc++;
}while(inc <= 3);
do{
inc /= 3;
for(i = inc; i < 3; ++i){
sw = w[i];
for(k = 0; k < 3; ++k) su[k] = u[k*3+i];
for(k = 0; k < 3; ++k) sv[k] = v[k*3+i];
j = i;
while (w[j-inc] < sw){
w[j] = w[j-inc];
for(k = 0; k < 3; ++k) u[k*3+j] = u[k*3+j-inc];
for(k = 0; k < 3; ++k) v[k*3+j] = v[k*3+j-inc];
j -= inc;
if (j < inc) break;
}
w[j] = sw;
for(k = 0; k < 3; ++k) u[k*3+j] = su[k];
for(k = 0; k < 3; ++k) v[k*3+j] = sv[k];
}
}while(inc > 1);
for(k = 0; k < 3; ++k){
s = 0;
for(i = 0; i < 3; ++i) if(u[i*3+k] < 0.) s++;
for(j = 0; j < 3; ++j) if(v[j*3+k] < 0.) s++;
if(s > 3){
for(i = 0; i < 3; ++i) u[i*3+k] = -u[i*3+k];
for(j = 0; j < 3; ++j) v[j*3+k] = -v[j*3+k];
}
}
return 1;
}
__device__ void multiply_matrix3(float* a, float* b, float* c){
float d[9];
for(int i=0; i<3; i++)
for(int j=0; j<3; j++)
d[i*3+j] = a[i*3+0]*b[0*3+j]+a[i*3+1]*b[1*3+j]+a[i*3+2]*b[2*3+j];
for(int k=0; k<9; k++)
c[k] = d[k];
}
__device__ float3 multiply_mv3(float*m,float3 v)
{
float3 a;
a.x = m[0] * v.x + m[1] * v.y + m[2] * v.z;
a.y = m[3] * v.x + m[4] * v.y + m[5] * v.z;
a.z = m[6] * v.x + m[7] * v.y + m[8] * v.z;
return a;
}
__device__ void transmit3(float* a,float* b){
float c[9];
c[0]=a[0]; c[1]=a[3]; c[2]=a[6];
c[3]=a[1]; c[4]=a[4]; c[5]=a[7];
c[6]=a[2]; c[7]=a[5]; c[8]=a[8];
for(int k=0; k<9; k++)
b[k]=c[k];
}
//__device__ float3 cross(const float3 v1,const float3 v2)
//{
// float3 result;
// result.x = v1.y*v2.z - v1.z*v2.y;
// result.y = v1.z*v2.x - v1.x*v2.z;
// result.z = v1.x*v2.y - v1.y*v2.x;
// return result;
//}
__device__ float3 col(const float* matrix,int col)
{
float3 result = make_float3(matrix[col], matrix[col + 3], matrix[col + 6]);
return result;
}
////四元数q转化为旋转矩阵R
//__device__ void QuaternionToMatrix(const float*q, float*R)
//{
// R[0] = 1 - 2 * q[1] * q[1] - 2 * q[2] * q[2];
// R[1] = 2 * q[0] * q[1] - 2 * q[3] * q[2];
// R[2] = 2 * q[0] * q[2] + 2 * q[3] * q[1];
// R[3] = 2 * q[0] * q[1] + 2 * q[3] * q[2];
// R[4] = 1 - 2 * q[0] * q[0] - 2 * q[2] * q[2];
// R[5] = 2 * q[1] * q[2] - 2 * q[3] * q[0];
// R[6] = 2 * q[0] * q[2] - 2 * q[3] * q[1];
// R[7] = 2 * q[1] * q[2] + 2 * q[3] * q[0];
// R[8] = 1 - 2 * q[0] * q[0] - 2 * q[1] * q[1];
//}
////q是一个四元数 x,y,z,w
//__device__ void extractRotation(int i,const float* A, float *q, const unsigned int maxIter)
//{
// float R[9];
// float temp_q[4];
// float norm;
// for (unsigned int iter = 0; iter < maxIter; iter++)
// {
// //translate q to matrix R
// QuaternionToMatrix(q, R);
// /*if (i == 37000)
// printf("R is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
// R[0], R[1], R[2], R[3], R[4], R[5], R[6], R[7], R[8]);
// if (i == 37000)
// printf("A is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
// A[0], A[1], A[2], A[3], A[4], A[5], A[6], A[7], A[8]);*/
// /*for (int i = 0; i < 9; ++i)
// R[i] = q[i];*/
// //Matrix3d R = q.matrix();
// float3 omega =
// (cross(col(R, 0),col(A,0))
// + cross(col(R, 1),col(A,1))
// + cross(col(R, 2),col(A,2)))
// * (1.0 / fabs(dot(col(R, 0),col(A,0))
// + dot(col(R, 1),col(A,1)) + dot(col(R, 2),col(A,2))) + 1.0e-9);
// if (i == 37000 && iter == 0)
// printf("omega is (%f,%f,%f)\n", omega.x, omega.y, omega.z);
// float w = sqrt(dot(omega,omega));
// if (w < 1.0e-9)
// break;
// omega /= w;
// temp_q[3] = w*q[3] - omega.x*q[0] - omega.y*q[1] - omega.z*q[2];
// temp_q[0] = w*q[0] + omega.x*q[3] + omega.y*q[2] - omega.z*q[1];
// temp_q[1] = w*q[1] + omega.y*q[3] + omega.z*q[0] - omega.x*q[2];
// temp_q[2] = w*q[2] + omega.z*q[3] + omega.x*q[1] - omega.y*q[0];
// //if (i == 37000)
// // printf("omega is (%f,%f,%f,%f)\n", omega.x, omega.y, omega.z, w);
// /*a.w() * b.w() - a.x() * b.x() - a.y() * b.y() - a.z() * b.z(),
// a.w() * b.x() + a.x() * b.w() + a.y() * b.z() - a.z() * b.y(),
// a.w() * b.y() + a.y() * b.w() + a.z() * b.x() - a.x() * b.z(),
// a.w() * b.z() + a.z() * b.w() + a.x() * b.y() - a.y() * b.x()*/
// norm = sqrt(temp_q[0] * temp_q[0] + temp_q[1] * temp_q[1] + temp_q[2] * temp_q[2] + temp_q[3] * temp_q[3]);
// //if (norm < 1.0e-9)
// // break;
// for (int i = 0; i < 4; ++i)
// q[i] = temp_q[i] / (norm + 1.0e-9);
//
// }
//}
__device__ void AxisToRotation(float* R,const float3 axis,const float angle)
{
float co = cos(angle), si = sin(angle);
R[0] = co + (1 - co)*axis.x*axis.x; R[1] = (1 - co)*axis.x*axis.y - si*axis.z; R[2] = (1 - co)*axis.x*axis.z + si*axis.y;
R[3] = (1 - co)*axis.y*axis.x + si*axis.z; R[4] = co + (1 - co)*axis.y*axis.y; R[5] = (1 - co)*axis.y*axis.z - si*axis.x;
R[6] = (1 - co)*axis.z*axis.x - si*axis.y; R[7] = (1 - co)*axis.z*axis.y + si*axis.x; R[8] = co + (1 - co)*axis.z*axis.z;
}
__device__ void extractRotation(const float*A, float*q, const unsigned int maxIter)
{
float R[9];
float norm;
float3 sum = make_float3(0, 0, 0);
float sum2 = 0;
//float error = 100000,error2;
for (unsigned int iter = 0; iter < maxIter; iter++)
//while(true)
{
sum = make_float3(0, 0, 0);
sum2 = 0;
for (int i = 0; i < 3; ++i)
{
sum += cross(col(q, i), col(A, i));
sum2 += dot(col(q, i), col(A, i));
}
sum2 = fabs(sum2) + 1.0e-9;
sum /= sum2;
sum2 = sqrt(dot(sum, sum));
if (sum2 < 1.0e-9)
break;
sum /= sum2;
AxisToRotation(R, sum, sum2);
multiply_matrix3(R, q, q);
/*error2 = 0;
for (int k = 0; k < 3; ++k)
error2 += dot(col(q, k), col(A, k));
if (fabs(error - error2) < 1 || fabs((error - error2) / error) < 0.001)
break;*/
}
}
__device__ float det(const float* a){
float det = a[0]*a[4]*a[8] + a[1]*a[5]*a[6] + a[2]*a[3]*a[7];
det -= (a[2]*a[4]*a[6] + a[1]*a[3]*a[8] + a[5]*a[7]*a[0]);
return det;
}
__device__ void tensorProduct(const float3 a,const float3 b,float* r)
{
r[0] = a.x * b.x; r[1] = a.x * b.y; r[2] = a.x * b.z;
r[3] = a.y * b.x; r[4] = a.y * b.y; r[5] = a.y * b.z;
r[6] = a.z * b.x; r[7] = a.z * b.y; r[8] = a.z * b.z;
}
//逆矩阵求解
__device__ void InverseMatrix3(float * B)
{
float E[9];
for (int i = 0; i<3; ++i)
{
for (int j = 0; j<3; ++j)
E[i*3 + j] = 0;
E[i*3 + i] = 1;
}
for (int k = 0; k<3; ++k)
{
//对自己这行除以a[k][k]
for (int j = k + 1; j<3; ++j)
B[k*3 + j] = B[k*3 + j] / B[k*3 + k];
for (int j = 0; j<3; ++j)
E[k*3 + j] /= B[k*3 + k];
B[k*3 + k] = 1.0;
//对每一行减去a[i][k] * a[k][j]
for (int i = k + 1; i<3; ++i)
{
for (int j = k + 1; j<3; ++j)
{
B[i*3 + j] = B[i*3 + j] - B[i*3 + k] * B[k*3 + j];
}
for (int j = 0; j<3; ++j)
E[i*3 + j] -= B[i*3 + k] * E[k*3 + j];
B[i*3 + k] = 0;
}
}
for (int k = 2; k >= 0; --k)
{
//对每一行减去B[i][k]
for (int i = k - 1; i >= 0; --i)
{
for (int j = 0; j<3; ++j)
E[i*3 + j] -= B[i*3 + k] * E[k*3 + j];
B[i*3 + k] = 0;
}
}
for (int i = 0; i < 9; ++i)
B[i] = E[i];
}
//Change density if needed
__global__ void mfChangeDensity (bufList buf,int pnum,const float scale)
{
simData.mf_dens[1] *= scale;
simData.mf_up = 1;
simData.mf_visc[1] = simData.mf_visc[0];
simData.VL = 0.3;
simData.VL2 = 0.3*0.3;
}
//The forces of boundary to fluid
__device__ float3 nor(float3 p)
{
float n1 = 0,n2 = 0,n3 = 0;
if (p.y<(int)simData.pboundmin.y) n2 = 1.0;
if (p.x<(int)simData.pboundmin.x) n1 = 1.0;
if (p.x>(int)simData.pboundmax.x) n1 = -1.0;
if (p.z<(int)simData.pboundmin.z) n3 = 1.0;
if (p.z>(int)simData.pboundmax.z) n3 = -1.0;
return make_float3(n1,n2,n3);
}
__device__ double flushData ( int i, float3 p, int cell, bufList buf )
{
float3 dist;
float dsq, c, sum;
//float massj;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
int j;
//float maxdis = 88888;
// register float cmterm;
sum = 0.0;
if ( buf.mgridcnt[cell] == 0 ) return 0;
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
for ( int cndx = cfirst; cndx < clast; cndx++ ){
if (buf.misbound[buf.mgrid[cndx]] == 0)
{
j = buf.mgrid[cndx];
dist = p - buf.mpos[ buf.mgrid[cndx] ];
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if ( dsq < r2 && dsq > 0.0)
{
c = (r2 - dsq)*d2;
sum += c * c * c * buf.mf_restmass[j]*dot(buf.mveleval[j],nor(buf.mpos[i]));
}
}
}
//c = r2*d2;
//sum += c*c*c*buf.mf_restmass[i];
return sum;
}
__device__ void findNearest ( int i, float3 p, int cell, bufList buf )
{
float3 dist;
float dsq;
// float massj;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
int j;
float maxdis = 88888;
// register float cmterm;
//register float3 alphagrad[MAX_FLUIDNUM];
//sum = 0.0;
if ( buf.mgridcnt[cell] == 0 ) return ;
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
for ( int cndx = cfirst; cndx < clast; cndx++ ) {
#ifdef NEW_BOUND
if (buf.misbound[buf.mgrid[cndx]] == 0)
{
j = buf.mgrid[cndx];
dist = p - buf.mpos[ buf.mgrid[cndx] ];
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if ( dsq < r2 && dsq > 0.0 && dsq*d2<maxdis)
{
maxdis = dsq*d2;
buf.midsort[i] = j;
}
}
#else
j = buf.mgrid[cndx];
dist = p - buf.mpos[ buf.mgrid[cndx] ];
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if ( dsq < r2 && dsq > 0.0 && dsq*d2<maxdis)
{
maxdis = dsq*d2;
buf.midsort[i] = j;
}
#endif
}
return ;
}
__global__ void mfFindNearest (bufList buf,int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
// Sum Pressures
float3 pos = buf.mpos[ i ];
#ifdef NEW_BOUND
if (buf.misbound[i]==1)
{
buf.midsort[i] = i;
buf.mf_restmass[i] = simData.pmass;
for (int c = 0; c<simData.gridAdjCnt; c++)
{
findNearest(i,pos,gc+simData.gridAdj[c],buf);
}
if (buf.midsort[i]!=i)
buf.mf_restmass[i] = buf.mf_restmass[buf.midsort[i]];
}
#endif
}
//Sorting
__global__ void InitialSort ( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
register float3 gridMin = simData.gridMin;
register float3 gridDelta = simData.gridDelta;
register int3 gridRes = simData.gridRes;
register int3 gridScan = simData.gridScanMax;
// register float poff = simData.psmoothradius / simData.psimscale;
register int gs;
register float3 gcf;
register int3 gc;
gcf = (buf.mpos[i] - gridMin) * gridDelta;
gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) );
gs = (gc.y * gridRes.z + gc.z)*gridRes.x + gc.x;
if ( gc.x >= 1 && gc.x <= gridScan.x && gc.y >= 1 && gc.y <= gridScan.y && gc.z >= 1 && gc.z <= gridScan.z ) {
buf.mgcell[i] = gs; // Grid cell insert.
buf.midsort[i] = i;
// buf.mgndx[i] = atomicAdd ( &buf.mgridcnt[ gs ], 1 ); // Grid counts.
// gcf = (-make_float3(poff,poff,poff) + buf.mpos[i] - gridMin) * gridDelta;
// gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) );
// gs = ( gc.y * gridRes.z + gc.z)*gridRes.x + gc.x;
//buf.mcluster[i] = gs; -- make sure it is allocated!
} else {
buf.mgcell[i] = GRID_UNDEF;
buf.midsort[i] = i;
//buf.mcluster[i] = GRID_UNDEF; -- make sure it is allocated!
}
}
__global__ void CalcFirstCnt ( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
if (i>=pnum) return;
if ((i == 0 || buf.mgcell[i]!=buf.mgcell[i-1]))
{
if (buf.mgcell[i]!=GRID_UNDEF)buf.mgridoff[buf.mgcell[i]] = i;
}
__syncthreads();
if (i!=0 && buf.mgcell[i]!=buf.mgcell[i-1] && buf.mgcell[i-1]!=GRID_UNDEF)
buf.mgridcnt[buf.mgcell[i-1]] = i;
if (i == pnum-1 && buf.mgcell[i]!=GRID_UNDEF)
buf.mgridcnt[buf.mgcell[i]] = i + 1;
/*
__shared__ uint scell[512]; // [blockDim.x+1}
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
uint cel;
if (i<pnum && buf.mgcell[i] != GRID_UNDEF)
{
cel=buf.mgcell[i];
scell[threadIdx.x+1]=cel;
if(i&&!threadIdx.x)scell[0]=buf.mgcell[i-1];
}
__syncthreads();
if(i<pnum && buf.mgcell[i] != GRID_UNDEF)
{
if(!i||cel!=scell[threadIdx.x])
{
buf.mgridoff[cel]=i;
if (i)
{
buf.mgridcnt[scell[threadIdx.x]] = i;
}
if (i == pnum - 1)
buf.mgridcnt[scell[threadIdx.x]] = i+1;
}
}
else if (i<pnum)
{
if (buf.mgcell[i] != scell[threadIdx.x])
{
buf.mgridcnt[scell[threadIdx.x]] = i;
}
}
*/
}
__global__ void GetCnt ( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i>=pnum) return ;
if (buf.mgcell[i]!=GRID_UNDEF)
{
buf.mgndx[i] = i - buf.mgridoff[buf.mgcell[i]];
if (buf.mgndx[i] == 0)
buf.mgridcnt[buf.mgcell[i]] -= buf.mgridoff[buf.mgcell[i]];
}
}
__global__ void CountingSortFull_ ( bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
uint icell = *(uint*) (buf.msortbuf + pnum*BUF_GCELL + i*sizeof(uint) );
uint indx = *(uint*) (buf.msortbuf + pnum*BUF_GNDX + i*sizeof(uint) );
int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset
// uint j = i;
i = buf.midsort[i];
if ( icell != GRID_UNDEF ) {
buf.mgrid[ sort_ndx ] = sort_ndx; // full sort, grid indexing becomes identity
char* bpos = buf.msortbuf + i*sizeof(float3);
buf.mpos[ sort_ndx ] = *(float3*) (bpos);
buf.mveleval[ sort_ndx ] = *(float3*) (bpos + pnum*BUF_VELEVAL );
buf.mpress[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_PRESS + i*sizeof(float) );
#ifdef NEW_BOUND
buf.misbound[ sort_ndx ] = *(int*) (buf.msortbuf + pnum*BUF_ISBOUND+ i*sizeof(int) ); // ((uint) 255)<<24; -- dark matter
#endif
buf.isInside[sort_ndx] = *(bool*)(buf.msortbuf + pnum*BUF_ISINSIDE + i * sizeof(bool));
buf.mgcell[ sort_ndx ] = icell;
buf.mgndx[ sort_ndx ] = indx;
//multi fluid
int mul_sort_ndx = sort_ndx*MAX_FLUIDNUM;
for( uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
//char* bmul = buf.msortbuf + i*sizeof(float)*MAX_FLUIDNUM + fcount * sizeof(float);
buf.mf_alpha[mul_sort_ndx+fcount] = *(float*)(buf.msortbuf + pnum*BUF_ALPHA + i*sizeof(float)*MAX_FLUIDNUM + fcount * sizeof(float));
buf.mf_alpha_next[mul_sort_ndx+fcount] = *(float*)(buf.msortbuf + pnum*BUF_ALPHAPRE+ i*sizeof(float)*MAX_FLUIDNUM + fcount * sizeof(float));
//porous
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
buf.mf_beta[mul_sort_ndx*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM+l]
= *(float*)(buf.msortbuf + pnum*BUF_ABSORBEDPERCENT + i * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount * sizeof(float)*MAX_SOLIDNUM + l*sizeof(float));
buf.mf_beta_next[mul_sort_ndx*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l]
= *(float*)(buf.msortbuf + pnum*BUF_BETANEXT + i * sizeof(float)*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount * sizeof(float)*MAX_SOLIDNUM + l * sizeof(float));
}
//buf.capillaryPotentials[mul_sort_ndx + fcount] = *(float*)(buf.msortbuf + pnum*BUF_CP + i * sizeof(float)*MAX_FLUIDNUM + fcount * sizeof(float));
}
//buf.mf_pressure_modify[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_PRESSMODI + i*sizeof(float));
buf.mf_restmass[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_RMASS + i*sizeof(float));
//buf.mf_velxcor[sort_ndx] = *(float3*)(buf.msortbuf + pnum*BUF_VELXCOR + i*sizeof(float3));
buf.MFtype[sort_ndx] = *(int*)(buf.msortbuf+ pnum*BUF_INDICATOR + i*sizeof(int));
//elastic information
buf.elasticID[sort_ndx] = *(uint*)(buf.msortbuf + pnum*BUF_ELASTICID + i * sizeof(uint));
if(buf.MFtype[sort_ndx] == 2)
buf.particleID[buf.elasticID[sort_ndx]] = sort_ndx;
if(_example == 2 && buf.MFtype[sort_ndx] >= 2)
buf.particleID[buf.elasticID[sort_ndx]] = sort_ndx;
}
}
//compute pressure
__device__ float mfContributePressure ( int i, float3 p, int cell, bufList buf, float& sum_solid, float& sum_fluid)
{
float3 dist;
float dsq, c, sum;
float massj;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
sum = 0.0;
int j;
if ( buf.mgridcnt[cell] == 0 )
return 0.0;
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
for ( int cndx = cfirst; cndx < clast; cndx++ ) {
j = buf.mgrid[cndx];
dist = p - buf.mpos[ buf.mgrid[cndx] ];
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if ( dsq < r2 && dsq > 0.0) {
c = (r2 - dsq)*d2;
sum += c * c * c * buf.mf_restmass[i];
if (buf.MFtype[i] == buf.MFtype[j])
{
if (buf.MFtype[i] == 0)
sum_fluid += c * c * c * buf.mf_restmass[i];
else
sum_solid += c * c * c * buf.mf_restmass[i];
}
if (buf.MFtype[i] + buf.MFtype[j] == 9)
sum_solid += c * c * c * buf.mf_restmass[i];
}
}
return sum;
}
__device__ float mfContributePressureInit ( int i, float3 p, int cell, bufList buf )
{
float3 dist;
float dsq, c, sum;
float massj;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
sum = 0.0;
int j;
if ( buf.mgridcnt[cell] == 0 )
return 0.0;
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
for ( int cndx = cfirst; cndx < clast; cndx++ ) {
j = buf.mgrid[cndx];
//if( buf.MFtype[i] == 2 && buf.MFtype[j]!=2)
if(buf.MFtype[i]!=buf.MFtype[j])
continue;
dist = p - buf.mpos[ buf.mgrid[cndx] ];
massj = buf.mf_restmass[ buf.mgrid[cndx] ];
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if ( dsq < r2 && dsq > 0.0) {
c = (r2 - dsq)*d2;
sum += c * c * c * massj;
}
}
return sum;
}
__global__ void mfPreComputeDensity ( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
// Sum Pressures
float3 pos = buf.mpos[ i ];
float dens = buf.mf_restdensity[i];
float sum = 0.0;
float sum_solid = 0.0;
float sum_fluid = 0.0;
for (int c=0; c < simData.gridAdjCnt; c++) {
sum += mfContributePressure ( i, pos, gc + simData.gridAdj[c], buf, sum_solid, sum_fluid);
//__syncthreads();
}
// Compute Density & Pressure
sum += simData.r2 * simData.r2 * simData.r2 * buf.mf_restmass[i];
sum_solid += simData.r2 * simData.r2 * simData.r2 * buf.mf_restmass[i];
//sum_fluid += simData.r2 * simData.r2 * simData.r2 * buf.mf_restmass[i];
sum = sum * simData.poly6kern;
sum_solid = sum_solid * simData.poly6kern;
sum_fluid = sum_fluid * simData.poly6kern;
if ( sum == 0.0 ) sum = 1.0;
#ifdef NEW_BOUND
buf.mdensity[ i ] = 1.0f / sum;
if (buf.MFtype[i] != 0)
{
buf.density_solid[i] = 1.0f / sum_solid;
//if (i % 10 == 0)
// printf("solid density is %f\n", buf.density_solid[i]);0.0026
}
#else
buf.mpress[ i ] = ( sum - dens ) * simData.pintstiff;
//buf.mpress[ i ] = (pow( sum/dens,7.0f )-1) * simData.pintstiff;
//buf.mpress[ i ] = simData.pintstiff * dens * (pow( sum/dens,7.0f )-1);
buf.mdensity[ i ] = 1.0f / sum;
#endif
}
__global__ void mfComputePressure ( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
// Sum Pressures
float3 pos = buf.mpos[ i ];
float dens = buf.mf_restdensity[i];
float sum = 0.0;
float sum_solid = 0;
float sum_fluid = 0;
for(uint fcount = 0; fcount<simData.mf_catnum;fcount++)
{
//buf.mf_alphagrad[i*MAX_FLUIDNUM+fcount] = make_float3(0,0,0);
buf.mf_alpha_next[i*MAX_FLUIDNUM+fcount] = buf.mf_alpha[i*MAX_FLUIDNUM+fcount];
buf.mf_beta[i*MAX_FLUIDNUM + fcount] = 0;
buf.mf_beta_next[i*MAX_FLUIDNUM + fcount] = 0;
}
/*if (buf.MFtype[i] == 0 && buf.mpos[i].y < 30)
{
buf.mf_alpha_next[i*MAX_FLUIDNUM + 2] = buf.mf_alpha[i*MAX_FLUIDNUM + 2] = 0;
buf.mf_beta[i*MAX_FLUIDNUM + 2] = 0.5;
}*/
for (int c=0; c < simData.gridAdjCnt; c++) {
sum += mfContributePressure ( i, pos, gc + simData.gridAdj[c], buf, sum_solid, sum_fluid );
//__syncthreads();
}
buf.isInside[i] = false;
// Compute Density & Pressure
sum += simData.r2 * simData.r2 * simData.r2 * buf.mf_restmass[i];
sum = sum * simData.poly6kern;
if ( sum == 0.0 ) sum = 1.0;
#ifdef NEW_BOUND
if (buf.misbound[i] ==1)
{
//buf.mpress[i] = ( sum - dens ) * simData.pextstiff;
//buf.mpress[ i ] = (pow( sum/dens,7.0f )-1) * simData.pintstiff;
//buf.mpress[ i ] += simData.pintstiff * dens * (pow( sum/dens,7.0f )-1);
buf.mpress[ i ] = ( sum - dens ) * simData.pintstiff;
//if (buf.mpress[i]<0) buf.mpress[i] = 0;
}
else
{
//buf.mpress[ i ] = ( sum - dens ) * simData.pintstiff;
//buf.mpress[ i ] = (pow( sum/dens,7.0f )-1) * simData.pintstiff;
//buf.mpress[ i ] = ( sum - dens ) * simData.pintstiff;
if( buf.MFtype[i]>=2)
buf.mpress[ i ] = simData.solid_pfactor * dens * (pow( sum/dens,7.0f )-1);
if( buf.MFtype[i]==0){
buf.mpress[ i ] = simData.fluid_pfactor * dens * (pow( sum/dens,7.0f )-1);
if(buf.mpress[i]<0)
buf.mpress[i]=0;
}
// buf.mdensity[ i ] = 1.0f / sum;
}
#else
buf.mpress[ i ] = ( sum - dens ) * simData.pintstiff;
//buf.mpress[ i ] = (pow( sum/dens,7.0f )-1) * simData.pintstiff;
//buf.mpress[ i ] = simData.pintstiff * dens * (pow( sum/dens,7.0f )-1);
buf.mdensity[ i ] = 1.0f / sum;
#endif
//buf.mpress[ i ] = (pow( sum/dens,7.0f )-1) * simData.pintstiff;
//buf.mpress[ i ] = simData.pintstiff * dens * (pow( sum/dens,7.0f )-1);
buf.vel_mid[i] = buf.mveleval[i];
}
__global__ void initDensity(bufList buf,int pnum){
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
if(buf.MFtype[i] == 0) //no need for fluid particles
return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
float3 pos = buf.mpos[ i ];
float sum = 0.0;
for (int c=0; c < simData.gridAdjCnt; c++) {
sum += mfContributePressureInit ( i, pos, gc + simData.gridAdj[c], buf );
}
sum += simData.r2 * simData.r2 * simData.r2 * buf.mf_restmass[i];
sum = sum * simData.poly6kern;
//now sum is density
buf.mf_restdensity[i] = sum;
//if (i == 0)
// printf("rest density is %f\n", buf.mf_restdensity[i]);
buf.mveleval[i] = make_float3(0, 0, 0);
buf.vel_mid[i] = make_float3(0, 0, 0);
}
//compute drift velocity
__device__ void contributeDriftVel( int i, int muli, float3 ipos, float idens, float ipress, int cell, bufList buf, float* ialpha, float* imassconcen, float3* idriftvelterm, float relax_coef, float3*ialphagrad){
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
float3 dist;
float cmterm;
float pmterm;
int j, mulj;
if ( buf.mgridcnt[cell] == 0 ) return;
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
float3 force = make_float3(0,0,0);
float3 pgrad[MAX_FLUIDNUM];
float3 pgradsum;
float3 cpgrad[MAX_FLUIDNUM];
float3 cpgradsum;
for ( int cndx = cfirst; cndx < clast; cndx++ ) {
j = buf.mgrid[ cndx ];
mulj = j * MAX_FLUIDNUM;
dist = ( ipos - buf.mpos[ j ] ); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
dist *= simData.psimscale;
if ( dsq < r2 && dsq > 0) {
//cx = (r2-dsq)*d2;
dsq = sqrt(dsq*d2);
c = ( simData.psmoothradius - dsq );
cmterm = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.mdensity[j];
if (buf.MFtype[j] == 0)
{
if (buf.mf_alpha_sum[j] < 0.000001)
continue;
//pressure
pgradsum = make_float3(0, 0, 0);
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
float jalphaprecount = buf.mf_alpha[mulj + fcount] / buf.mf_alpha_sum[j];
//float ialphaprecount = ialpha_pre[fcount];
pmterm = cmterm * (-ialpha[fcount] * ipress + jalphaprecount*buf.mpress[j]);
//pmterm = cmterm * (-ialpha_pre[fcount]*ipress + buf.mf_alpha_pre[mulj+fcount]*buf.mpress[j]);
pgrad[fcount] = pmterm * dist;
if (isnan(dot(cpgrad[fcount], cpgrad[fcount])))
continue;
pgradsum += pgrad[fcount] * imassconcen[fcount];
//grad alpha
ialphagrad[fcount] += (jalphaprecount - ialpha[fcount]) * cmterm * dist;
}
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
if (isnan(dot(cpgrad[fcount], cpgrad[fcount])))
continue;
idriftvelterm[fcount] -= relax_coef * (pgrad[fcount] - pgradsum);
}
}
if(buf.MFtype[j] >= 2)//capillary term
{
cpgradsum = make_float3(0, 0, 0);
for(int k=1;k<simData.mf_catnum;++k)
{
//float jalphaprecount = buf.mf_alpha[mulj + k] / buf.mf_alpha_sum[j];
pmterm = cmterm * (-buf.pressure_water[i*simData.mf_catnum*MAX_SOLIDNUM+k*MAX_SOLIDNUM+buf.MFtype[j]-2] + buf.pressure_water[j*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2]);
cpgrad[k] = pmterm * dist;
if (isnan(dot(cpgrad[k], cpgrad[k])))
{
//printf("cpgrad %d is (%f,%f,%f)\n", k, cpgrad[k].x, cpgrad[k].y, cpgrad[k].z);
continue;
}
cpgradsum += cpgrad[k] * imassconcen[k];
}
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
if (isnan(dot(cpgrad[fcount], cpgrad[fcount])))
continue;
idriftvelterm[fcount] -= relax_coef*simData.relax2* (cpgrad[fcount] - cpgradsum);
}
}
}
}
}
__global__ void applyAlphaAndBeta(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
//if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
register float3 accel = -buf.mforce[i]; // final accel (g-a) of last step was stored in here cf. advance,
register uint muloffseti = i * MAX_FLUIDNUM;
float alphasum = 0;
for (uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
//float temp = buf.mf_alpha[muloffseti+fcount];
//buf.mf_alpha_pre[muloffseti+fcount] = temp; //alpha->alpha_pre
buf.mf_alpha[muloffseti + fcount] = buf.mf_alpha_next[muloffseti + fcount];
alphasum += buf.mf_alpha_next[muloffseti + fcount];
//buf.mf_alphagrad[i*MAX_FLUIDNUM + fcount] = make_float3(0, 0, 0);
}
for (uint fcount = 0; fcount < MAX_FLUIDNUM*MAX_SOLIDNUM; fcount++)
buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount] = buf.mf_beta_next[i*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount];
float newdens, newvisc, newmass, newdensout;
//Restdensity Update
newdens = 0.0;
newvisc = 0.0;
//newdensout = 0.0;
//newmass = 0.0;
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
newdens += buf.mf_alpha[i*MAX_FLUIDNUM + fcount] * simData.mf_dens[fcount];
newvisc += buf.mf_alpha[i*MAX_FLUIDNUM + fcount] * simData.mf_visc[fcount];
}
float betasum = 0;
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
for(int l=0;l<MAX_SOLIDNUM;++l)
{
newdens += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] * simData.mf_dens[fcount];
newvisc += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] * simData.mf_visc[fcount];
betasum += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l];
}
//if (buf.MFtype[i] == 0)
{
buf.mf_restdensity[i] = newdens;
//buf.mf_restmass[i] = newmass;
buf.mf_visc[i] = newvisc;
buf.mf_restdensity_out[i] = newdensout;
}
if (buf.mf_restdensity[i] <= 10)
printf("rest den is %f, alpha is (%f,%f,%f), betasum is %f\n",
buf.mf_restdensity[i], buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2], buf.mf_alpha[i*MAX_FLUIDNUM + 3],
betasum);
}
__global__ void mfComputeDriftVel( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum)
return;
//if (i % 1000 == 0)
// for (uint fcount = 0; fcount < simData.mf_catnum; fcount++)
// printf("particle %d's pressure is %f\n",
// i, buf.mpress[i]);
if (buf.MFtype[i] != 0)
return;
if (buf.mf_alpha_sum[i] <= 0.01)
{
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
buf.mf_vel_phrel[i*MAX_FLUIDNUM + fcount] = make_float3(0, 0, 0);
return;
}
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
float relax_coef = simData.relax; // temporary relax time related coefficient
//register float relax_coef = 0;
float sigma = 0.001f;//0.001f; //diffusion&tension coefficient
float cont, conts, contr;
cont = simData.cont;
conts = simData.cont1;
contr = simData.cont2;
float3 accel = -buf.mforce[i]; // final accel (g-a) of last step was stored in here cf. advance,
//register float massFrack[MAX_FLUIDNUM];
uint muloffseti = i * MAX_FLUIDNUM;
float invdens = 1.0/buf.mf_restdensity_out[i];
float dsum;
float vrx, vry, vrz;
float tdiff;
float3 ssum;
float alpha[MAX_FLUIDNUM],mass_concen[MAX_FLUIDNUM];
float ipress = buf.mpress[ i ];
float3 ipos = buf.mpos[ i ];
float idens = buf.mdensity[ i ];
float3 driftVelterm[MAX_FLUIDNUM],alphaGradterm[MAX_FLUIDNUM];
float3 sterm[MAX_FLUIDNUM];
//various viscosity
relax_coef /= buf.mf_visc[i];
//relax_coef *= (99*buf.mf_alpha_pre[i*MAX_FLUIDNUM+2]+1);
//third term
for(uint fcount = 1;fcount < simData.mf_catnum; fcount++)
{
//float temp = buf.mf_alpha[muloffseti+fcount];
//buf.mf_alpha_pre[muloffseti+fcount] = temp; //alpha->alpha_pre
if (buf.mf_alpha_sum[i] > 0.0001)
alpha[fcount] = buf.mf_alpha[muloffseti + fcount] / buf.mf_alpha_sum[i];
else
alpha[fcount] = 0;
//mass_concen[fcount] = alpha[fcount]*simData.mf_dens[fcount]*invdens;
mass_concen[fcount] = alpha[fcount] * simData.mf_dens[fcount] * invdens;
//if (isnan(mass_concen[fcount]))
// printf("alpha pre is %f, invdens is %f\n",
// alpha_pre[fcount], invdens);
driftVelterm[fcount] = make_float3(0,0,0);
alphaGradterm[fcount] = make_float3(0,0,0);
}
for (int c=0; c < simData.gridAdjCnt; c++) {
contributeDriftVel ( i, muloffseti, ipos, idens, ipress, gc + simData.gridAdj[c], buf, alpha, mass_concen, driftVelterm, relax_coef, alphaGradterm);
}
for( uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
//buf.mf_vel_phrel[muloffseti+fcount] = cont * contr * driftVelterm[fcount];
float3 vel = cont * contr * driftVelterm[fcount];
buf.mf_vel_phrel[muloffseti+fcount] = vel;
}
//first term & second term
dsum = 0;
ssum = make_float3(0,0,0);
if(buf.mf_alpha_sum[i] > 0.01)
for( uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
float temp = buf.mf_alpha[muloffseti+fcount] / buf.mf_alpha_sum[i];
dsum += temp * simData.mf_dens[fcount] * simData.mf_dens[fcount] * invdens;
if (temp > 0.0001)
//sterm[fcount] = buf.mf_alphagrad[muloffseti+fcount]/temp;
sterm[fcount] = alphaGradterm[fcount] / temp;
else
sterm[fcount] = make_float3(0,0,0);
//sterm[fcount] = alphaGradterm[fcount];
ssum += sterm[fcount] * temp * simData.mf_dens[fcount] * invdens;
}
for( uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
tdiff = simData.mf_dens[fcount]-dsum;
tdiff *= relax_coef;
vrx = accel.x * tdiff;
vry = accel.y * tdiff;
vrz = accel.z * tdiff;
buf.mf_vel_phrel[muloffseti+fcount] += make_float3(vrx,vry,vrz);
buf.mf_vel_phrel[muloffseti+fcount] -=
cont * conts * sigma * (sterm[fcount]-ssum);
if (isnan(dot(buf.mf_vel_phrel[muloffseti + fcount], buf.mf_vel_phrel[muloffseti + fcount])))
//if(i%1000 ==0)
printf("particle %d phase %d's vel is (%f,%f,%f),accel is (%f,%f,%f),alpha is %f, sterm is (%f,%f,%f), driftVelterm is (%f,%f,%f), press is %f, mass concern is (%f,%f,%f), alphaSum is %f, densityout is %f, pressure water is (%f,%f,%f,%f), visco is %f, relax_coef is %f\n",
i, fcount, buf.mf_vel_phrel[muloffseti + fcount].x, buf.mf_vel_phrel[muloffseti + fcount].y,
buf.mf_vel_phrel[muloffseti + fcount].z, accel.x, accel.y, accel.z,
buf.mf_alpha[muloffseti + fcount], sterm[fcount].x, sterm[fcount].y, sterm[fcount].z,
driftVelterm[fcount].x, driftVelterm[fcount].y, driftVelterm[fcount].z, buf.mpress[i],
mass_concen[1], mass_concen[2], mass_concen[3], buf.mf_alpha_sum[i],buf.mf_restdensity_out[i],
buf.pressure_water[muloffseti*MAX_SOLIDNUM+fcount*MAX_SOLIDNUM+0], buf.pressure_water[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + 1],
buf.pressure_water[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + 2], buf.pressure_water[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + 3], buf.mf_visc[i], relax_coef);
}
}
__device__ float3 contributeTDM(int i, int muli, float idens, float3 pos, int cell, bufList buf, float* ialpha_pre, float3* ivmk)
{
float3 force = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0) return force;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist, sf;
float c, dsq2, dsq, q;
int j, mulj;
float cmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] != 0)
continue;
mulj = j * MAX_FLUIDNUM;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
q = sqrt(dsq2 / r2);
if (!(dsq2 < r2&&dsq2>0))
continue;
dsq = sqrt(dsq2);
if (q <= 0.5)
cmterm = simData.gradCubicSplineKern * (3 * q*q - 2 * q);
else
cmterm = -simData.gradCubicSplineKern * pow(1 - q, 2);
cmterm *= buf.mf_restmass[j] * buf.mdensity[j] / dsq;
//T_dm
for (uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
float3 dtermj = cmterm * dot(buf.mf_vel_phrel[mulj + fcount], dist) * buf.mf_alpha[mulj + fcount] * buf.mf_vel_phrel[mulj + fcount];
float3 dtermi = cmterm * dot(ivmk[fcount], dist) * ialpha_pre[fcount] * ivmk[fcount];
//example 2 doesn't have this term
force += (dtermj + dtermi) * simData.mf_dens[fcount] * idens;
}
}
return force;
}
__global__ void mfComputeTDM(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i] != 0) {
return;
}
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
bool error = false;
// Sum Pressures
float3 pos = buf.mpos[i];
float dens = buf.mf_restdensity[i];
float3 force = make_float3(0, 0, 0);
register uint muloffseti = i * MAX_FLUIDNUM;
register float alpha[MAX_FLUIDNUM];
register float3 ivmk[MAX_FLUIDNUM];
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
alpha[fcount] = buf.mf_alpha[muloffseti + fcount];
ivmk[fcount] = buf.mf_vel_phrel[muloffseti + fcount];
}
for (int c = 0; c < simData.gridAdjCnt; c++)
{
force += contributeTDM(i, muloffseti, buf.mdensity[i], pos, gc + simData.gridAdj[c], buf, alpha, ivmk);
}
if (isnan(dot(force,force)))
//if(i%1000 ==0)
printf("particle %d tdm is nan, press is %f, alphaSum is %f, densityout is %f\n",
i, buf.mpress[i],
buf.mf_alpha_sum[i], buf.mf_restdensity_out[i]);
//bound force and gravity
buf.mforce[i] += force;
//buf.fluidForce[i] += force;
buf.maccel[i] = buf.mforce[i];
}
//compute alpha advance
__device__ void contributeAlphaChange( int i, int muli, float3 ipos, float3 iveleval, float ipress, float idens, int cell, bufList buf, float* ialpha, float* ialphachange, float3* ivmk)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2/d2;
float3 dist, vmr, vkr;
float cmterm;
int j, mulj;
//float3 jvmk[MAX_FLUIDNUM];
float jalpha_prek;
//float alphachange = 0.0;
if ( buf.mgridcnt[cell] == 0 ) return;// make_float3(0,0,0);
int cfirst = buf.mgridoff[ cell ];
int clast = cfirst + buf.mgridcnt[ cell ];
//force = make_float3(0,0,0);
//vterm = simData.lapkern * simData.pvisc;
for ( int cndx = cfirst; cndx < clast; cndx++ ) {
j = buf.mgrid[ cndx ];
#ifdef NEW_BOUND
if (buf.misbound[j] ==1) continue;
#endif
if(buf.MFtype[j] != buf.MFtype[i])
continue;
mulj = j * MAX_FLUIDNUM;
dist = ( ipos - buf.mpos[ j ] ); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
dist *= simData.psimscale;
if ( dsq < r2 && dsq > 0) {
dsq = sqrt(dsq*d2);
c = ( simData.psmoothradius - dsq );
cmterm = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.mdensity[j];
vmr = buf.mveleval[j] - iveleval;
for(uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
jalpha_prek = buf.mf_alpha[mulj+fcount];
//-alpha_k * (nabla cdot v_m)
ialphachange[fcount] -= 0.5 * cmterm * (jalpha_prek+ialpha[fcount]) * (vmr.x * dist.x + vmr.y * dist.y + vmr.z * dist.z);
//buf.mf_alpha[muli+fcount] -= 0.5 * cmterm * (jalpha_prek+ialpha_pre[fcount]) * (vmr.x * dist.x + vmr.y * dist.y + vmr.z * dist.z);
//-nabla cdot (alpha_k * u_mk)
vkr = make_float3((jalpha_prek * buf.mf_vel_phrel[mulj+fcount].x + ialpha[fcount] * ivmk[fcount].x),
(jalpha_prek * buf.mf_vel_phrel[mulj+fcount].y + ialpha[fcount] * ivmk[fcount].y),
(jalpha_prek * buf.mf_vel_phrel[mulj+fcount].z + ialpha[fcount] * ivmk[fcount].z));
ialphachange[fcount] -= cmterm * (vkr.x * dist.x + vkr.y * dist.y + vkr.z * dist.z);
//buf.mf_alpha[muli+fcount] -= cmterm * (vkr.x * dist.x + vkr.y * dist.y + vkr.z * dist.z);
}
//pterm = simData.psimscale * -0.5f * c * simData.spikykern * ( ipress + buf.mpress[ j ] ) / dsq;
//dterm = c * idens * (buf.mdensity[ j ] );
//force += ( pterm * dist + vterm * ( buf.mveleval[ j ] - iveleval )) * dterm;
}
}
//return force;
//return alphachange;
}
__global__ void mfComputeAlphaAdvance( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum)
return ;
if (buf.MFtype[i] != 0)
return;
if (buf.mf_alpha_sum[i] < 0.01)
return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
register uint muloffseti = i * MAX_FLUIDNUM;
register float3 ipos = buf.mpos[ i ];
register float3 iveleval = buf.mveleval[ i ];
register float ipress = buf.mpress[ i ];
register float idens = buf.mdensity[ i ];
register float alpha[MAX_FLUIDNUM],alphachange[MAX_FLUIDNUM];
register float3 ivmk[MAX_FLUIDNUM];
for(uint fcount = 1;fcount < simData.mf_catnum; fcount++)
{
alpha[fcount] = buf.mf_alpha[muloffseti+fcount];
alphachange[fcount] = 0.0f;
ivmk[fcount] = buf.mf_vel_phrel[muloffseti+fcount];
//buf.mf_alpha[muloffseti+fcount] = 0.0f;
}
for (int c=0; c < simData.gridAdjCnt; c++) {
contributeAlphaChange ( i, muloffseti, ipos, iveleval, ipress, idens, gc + simData.gridAdj[c], buf, alpha, alphachange, ivmk);
}
for(uint fcount = 1;fcount < simData.mf_catnum; fcount++)
{
//buf.mf_alpha[muloffseti+fcount] += alphachange[fcount] * simData.mf_dt;
alphachange[fcount] *= simData.mf_dt;
//alphachange limit
if(alphachange[fcount]<-0.99)
{
alphachange[fcount] = -0.99;// * ((int)(buf.mf_alpha[muloffseti+fcount]>0)-(int)(buf.mf_alpha[muloffseti+fcount]<0));
}
buf.mf_alphachange[i*MAX_FLUIDNUM + fcount] = alphachange[fcount];
//if (abs(alphachange[fcount]) >= 0.001)
// printf("particle %d's phase %d's alpha change is %f\n", i, fcount, alphachange[fcount]);
buf.mf_alpha_next[muloffseti+fcount] = alphachange[fcount] + alpha[fcount];
//buf.mf_alpha_next[muloffseti + fcount] = alpha[fcount];
if (isnan(alphachange[fcount]) || isnan(alpha[fcount]))
printf("particle %d phase %d's alpha change is %f, pre alpha is %f, vmk is (%f,%f,%f)\n",
i, fcount, alphachange[fcount], alpha[fcount],
buf.mf_vel_phrel[i*MAX_FLUIDNUM + fcount].x,
buf.mf_vel_phrel[i*MAX_FLUIDNUM + fcount].y, buf.mf_vel_phrel[i*MAX_FLUIDNUM + fcount].z);
//buf.mf_alpha[muloffseti+fcount] *= simData.mf_dt;
//if(buf.mf_alpha[muloffseti+fcount]<-0.99)
//{
// buf.mf_alpha[muloffseti+fcount] = -0.99;// * ((int)(buf.mf_alpha[muloffseti+fcount]>0)-(int)(buf.mf_alpha[muloffseti+fcount]<0));
//}
//buf.mf_alpha[muloffseti+fcount] += alpha_pre[fcount];
}
}
//compute correction
__global__ void mfComputeCorrection( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
uint muloffseti = i * MAX_FLUIDNUM;
float sum, alphasum = 0, betasum = 0, alphaPercent, betaPercent;
int flag;
sum = 0.0f;
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
for (int l = 0; l<MAX_SOLIDNUM; ++l)
{
if (buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] < 0.01)
buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] = 0;
//if (buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] > 0.99)
// buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] = 1.0f;
betasum += buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l];
}
if (buf.mf_alpha_next[muloffseti + fcount] < 0.01)
buf.mf_alpha_next[muloffseti + fcount] = 0.0f;
//if (buf.mf_alpha_next[muloffseti + fcount] > 0.99)
// buf.mf_alpha_next[muloffseti + fcount] = 1.0f;
alphasum += buf.mf_alpha_next[muloffseti + fcount];
}
sum = alphasum + betasum;
flag = (sum>0.0f);
sum = flag*sum + (1 - flag)*1.0f;
sum = 1.0 / sum;
alphaPercent = alphasum * sum;
betaPercent = betasum * sum;
if (betaPercent == 0)
betasum = 1;
else
betasum = 1 / betasum;
if (alphaPercent == 0)
alphasum = 1;
else
alphasum = 1 / alphasum;
//int cat = findMaxCat(alpha_pre, simData.mf_catnum, idx, idxlen);
int maxcat = 3*MAX_SOLIDNUM + 3;
for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
{
buf.mf_alpha_next[muloffseti + fcount] = (flag)*buf.mf_alpha_next[muloffseti + fcount] * alphaPercent * alphasum + (1 - flag)*(fcount == maxcat ? 1 : 0);
for (int l = 0; l<MAX_SOLIDNUM; ++l)
buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] =
(flag)*buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l] * betaPercent * betasum
+ (1 - flag)*(fcount*MAX_SOLIDNUM + l == maxcat ? 1 : 0);
}
//sum = 0;
//for (uint fcount = 1; fcount < simData.mf_catnum; fcount++)
//{
// sum += buf.mf_alpha_next[muloffseti + fcount];
// for (int l = 0; l < MAX_SOLIDNUM; ++l)
// sum += buf.mf_beta_next[muloffseti*MAX_SOLIDNUM + fcount*MAX_SOLIDNUM + l];
//}
//if (abs(sum - 1) > 0.001)
// printf("correction lose function, sum is %f\n", sum);
}
__device__ float gamma(float q)
{
if (q<2.0/3.0 && q>0)
return 2.0/3.0;
if (q>=2.0/3.0 && q<1)
return 2*q-3.0/2.0*q*q;
if (q>=1 && q<2)
return (2-q)*(2-q)/2.0;
return 0;
}
////compute force
//__device__ float3 contributeMfForce( int i, int muli, float3 ipos, float3 iveleval, float ipress, float idens, int cell, bufList buf, float* ialpha_pre, float ipressure_modify, float3* ivmk, float3* ivelxcor, float ivisc)
//{
// float dsq, c;
// register float d2 = simData.psimscale * simData.psimscale;
// register float r2 = simData.r2/d2;
//
// float3 dist, vmr;
// float cmterm;
// float pmterm, vmterm;
// int j, mulj;
// float aveDenij,cx,xterm;
// //float3 jvmk[MAX_FLUIDNUM];
// //float jalpha_prek;
//
// if ( buf.mgridcnt[cell] == 0 ) return make_float3(0,0,0);
//
// int cfirst = buf.mgridoff[ cell ];
// int clast = cfirst + buf.mgridcnt[ cell ];
//
// float3 force = make_float3(0,0,0);
// //massi = buf.mf_restmass[i];
// for ( int cndx = cfirst; cndx < clast; cndx++ ) {
// j = buf.mgrid[ cndx ];
//// massj = buf.mf_restmass[j];
// mulj = j * MAX_FLUIDNUM;
// dist = ( ipos - buf.mpos[ j ] ); // dist in cm
// dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
// dist *= simData.psimscale;
// if ( dsq < r2 && dsq > 0) {
// cx = (r2-dsq)*d2;
// dsq = sqrt(dsq*d2);
// c = ( simData.psmoothradius - dsq );
// cmterm = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.mdensity[j];
// //pressure
// if (buf.misbound[j] != 1)
// {
// pmterm = -0.5f * cmterm * (ipress + ipressure_modify + buf.mpress[j] + buf.mf_pressure_modify[j])*idens;
// //pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
// force += pmterm * dist;
// //viscosity
// vmr = iveleval - buf.mveleval[j]; //This is different from that in contributeAlphaChange()
// vmterm = cmterm * (ivisc+buf.mf_visc[j]) * idens;
// force += vmterm * vmr;
// }
// else
// {
// pmterm = -0.5f * cmterm * (ipress + ipressure_modify + buf.mpress[j])*idens;
// //pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
// force += pmterm * dist*0.03;
// //viscosity
// vmr = iveleval - buf.mveleval[j]; //This is different from that in contributeAlphaChange()
// vmterm = cmterm * (ivisc+buf.mf_visc[j]) * idens;
// force += vmterm * vmr*0.03;
// }
// /*
// else pmterm = -0.5f * cmterm * (ipress + ipressure_modify + buf.mpress[j])*idens/30.0;
// if (buf.misbound[j] ==1)
// vmterm/= 30.0;
// */
// if (buf.misbound[j] != 1)
// //T_dm
// for(uint fcount = 0; fcount < simData.mf_catnum; fcount++)
// {
// float3 dtermj = cmterm * (buf.mf_vel_phrel[mulj+fcount].x * dist.x + buf.mf_vel_phrel[mulj+fcount].y * dist.y + buf.mf_vel_phrel[mulj+fcount].z * dist.z) * buf.mf_alpha_next[mulj+fcount] * buf.mf_vel_phrel[mulj+fcount];
// float3 dtermi = cmterm * (ivmk[fcount].x * dist.x + ivmk[fcount].y * dist.y + ivmk[fcount].z * dist.z) * ialpha_pre[fcount] * ivmk[fcount];
// force += (dtermj + dtermi) * simData.mf_dens[fcount] * idens;
// }
//#ifndef _nXSPH
// //XSPH correction
// aveDenij = 2/(1/buf.mdensity[j]+1/idens);
// xterm = cx*cx*cx*buf.mf_restmass[j]*aveDenij*simData.poly6kern*0.5; //0.5=epsilon
// ivelxcor->x += -vmr.x * xterm;
// ivelxcor->y += -vmr.y * xterm;
// ivelxcor->z += -vmr.z * xterm;
// }
//#endif
// }
// return force;
//}
//advance particles
__device__ void mfChRebalance(int i, int muli, bufList buf, int firstReactor, int secondReactor, int product)
{
float chGamma = 0.01;
register float alpha1 = buf.mf_alpha[muli+firstReactor];
register float alpha2 = buf.mf_alpha[muli+secondReactor];
//register float alphap;
register float massTrans1, massTrans2;
//register float V0 = buf.mf_restmass[i] * buf.mdensity[i];
register float Vp;
register float rhop1 = simData.mf_dens[firstReactor];
register float rhop2 = simData.mf_dens[secondReactor];
register float rhopp = simData.mf_dens[product];
register float deltaAlphaP;
//chGamma *= (alpha1*alpha2);
chGamma *= (alpha1+alpha2);
if(chGamma == 0)return;
if(chGamma > alpha1)chGamma = alpha1;
if(chGamma > alpha2)chGamma = alpha2;
massTrans1 = chGamma * rhop1;
massTrans2 = chGamma * rhop2;
deltaAlphaP = (massTrans1 + massTrans2) / rhopp;
Vp = 1 + deltaAlphaP - 2 * chGamma;
Vp = 1/Vp;
buf.mf_alpha[muli+firstReactor] -= chGamma;
buf.mf_alpha[muli+secondReactor] -= chGamma;
buf.mf_alpha[muli+product] += deltaAlphaP;
for(uint fcount = 0; fcount<simData.mf_catnum; fcount++)
{
buf.mf_alpha[muli+fcount] *= Vp;
}
buf.mf_restdensity[i] *= Vp;
}
//**** shadow functions *******
__global__ void mfComputeDriftVelVelLimit( bufList buf, int pnum )
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum)
return ;
#ifdef NEW_BOUND
if(buf.misbound[i]==1)
return;
#endif
// Get search cell
int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[ i ];
if ( gc == GRID_UNDEF ) return; // particle out-of-range
gc -= nadj;
register float relax_coef = simData.relax; // temporary relax time related coefficient
register float sigma = 0.001f;//0.001f; //diffusion&tension coefficient
register float cont, conts, contr;
cont = simData.cont;
conts = simData.cont1;
contr = simData.cont2;
register float3 accel = buf.mforce[i]; // final accel (g-a) of last step was stored in here cf. advance,
//register float massFrack[MAX_FLUIDNUM];
register uint muloffseti = i * MAX_FLUIDNUM;
register float invdens = 1.0/buf.mf_restdensity[i];
register float dsum;
register float vrx, vry, vrz;
register float tdiff;
register float3 ssum;
register float alpha_pre[MAX_FLUIDNUM],mass_concen[MAX_FLUIDNUM];
register float ipress = buf.mpress[ i ];
register float3 ipos = buf.mpos[ i ];
register float idens = buf.mdensity[ i ];
register float3 driftVelterm[MAX_FLUIDNUM],alphaGradterm[MAX_FLUIDNUM];
register float3 sterm[MAX_FLUIDNUM];
//various viscosity
relax_coef /= buf.mf_visc[i];
//relax_coef *= (99*buf.mf_alpha_pre[i*MAX_FLUIDNUM+2]+1);
//third term
for(uint fcount = 0;fcount < simData.mf_catnum; fcount++)
{
//float temp = buf.mf_alpha[muloffseti+fcount];
//buf.mf_alpha_pre[muloffseti+fcount] = temp; //alpha->alpha_pre
alpha_pre[fcount] = buf.mf_alpha_next[muloffseti+fcount];
mass_concen[fcount] = alpha_pre[fcount]*simData.mf_dens[fcount]*invdens;
driftVelterm[fcount] = make_float3(0,0,0);
alphaGradterm[fcount] = make_float3(0,0,0);
}
for (int c=0; c < simData.gridAdjCnt; c++) {
contributeDriftVel ( i, muloffseti, ipos, idens, ipress, gc + simData.gridAdj[c], buf, alpha_pre, mass_concen, driftVelterm, relax_coef, alphaGradterm);
}
for( uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
//buf.mf_vel_phrel[muloffseti+fcount] = cont * contr * driftVelterm[fcount];
float3 vel = cont * contr * driftVelterm[fcount];
float speed = vel.x*vel.x + vel.y*vel.y + vel.z*vel.z;
if ( speed > simData.VL2 ) {
vel *= simData.VL / sqrt(speed);
}
buf.mf_vel_phrel[muloffseti+fcount] = vel;
}
//first term & second term
dsum = 0;
ssum = make_float3(0,0,0);
for( uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
//float temp = buf.mf_alpha[muloffseti+fcount];
//dsum += temp * simData.mf_dens[fcount] * simData.mf_dens[fcount] * invdens;
//buf.mf_alpha_pre[muloffseti+fcount] = temp; //alpha->alpha_pre
float temp = buf.mf_alpha_next[muloffseti+fcount];
dsum += temp * simData.mf_dens[fcount] * simData.mf_dens[fcount] * invdens;
if(temp>0.0001)
//sterm[fcount] = buf.mf_alphagrad[muloffseti+fcount]/temp;
sterm[fcount] = alphaGradterm[fcount]/temp;
else
sterm[fcount] = make_float3(0,0,0);
ssum += sterm[fcount] * temp * simData.mf_dens[fcount] * invdens;
}
for( uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
tdiff = simData.mf_dens[fcount]-dsum;
tdiff *= relax_coef;
vrx = accel.x * tdiff;
vry = accel.y * tdiff;
vrz = accel.z * tdiff;
buf.mf_vel_phrel[muloffseti+fcount] += make_float3(vrx,vry,vrz);
buf.mf_vel_phrel[muloffseti+fcount] -= cont * conts * sigma * (sterm[fcount]-ssum);
}
}
//***** End Shadow Functions *******
// ********** Project-u Functions *********
//__device__ float3 contributeForce_projectu(int i, int muli, float3 ipos, float3 iveleval, float ipress, float idens, int cell, bufList buf, float* ialpha_pre, float ipressure_modify, float3* ivmk, float3* ivelxcor, float ivisc)
//{
// //Force here represents the acceleration
// float dsq, c;
// register float d2 = simData.psimscale * simData.psimscale;
// register float r2 = simData.r2/d2;
//
// float3 dist, vmr ;
// float cmterm,cmterm1;
//// float massj;
// float pmterm, vmterm;
//// float q;
// int j, mulj;
// float aveDenij,cx,xterm;
//
// if ( buf.mgridcnt[cell] == 0 ) return make_float3(0,0,0);
//
// int cfirst = buf.mgridoff[ cell ];
// int clast = cfirst + buf.mgridcnt[ cell ];
//
// float3 force = make_float3(0,0,0);
// //massi = buf.mf_restmass[i];
//
// for ( int cndx = cfirst; cndx < clast; cndx++ )
// {
// j = buf.mgrid[ cndx ];
//
// //massj = buf.mf_restmass[j];
// mulj = j * MAX_FLUIDNUM;
// dist = ( ipos - buf.mpos[ j ] ); // dist in cm
// dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
// dist *= simData.psimscale;
//
// if ( dsq < r2 && dsq > 0) {
// cx = (r2-dsq)*d2;
// dsq = sqrt(dsq*d2);
// c = ( simData.psmoothradius - dsq );
//
// cmterm1 = simData.spikykern * c * c / dsq;
// cmterm = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.mdensity[j];
// //pressure
//#ifdef NEW_BOUND
// if (buf.misbound[j] != 1) //force between fluid and solid, force within fluid
// {
//
// if( buf.MFtype[j]==0)
// pmterm = -0.5f * cmterm * (ipress + buf.mpress[j] + buf.mf_pressure_modify[j] )*idens;
// else
// pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
//
// if(buf.MFtype[i]==0 && buf.MFtype[j]==1 && buf.mpress[j]<0)
// pmterm = -0.5f * cmterm * (ipress + 0)*idens;
//
// //pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
// //if( (buf.MFtype[i]==0 && buf.MFtype[j]==0))
// // force += pmterm * dist;
// ////固体与液体之间的排斥力,暂时注释掉
// //if(! (buf.MFtype[i]==1 && buf.MFtype[j]==1)){
// // force += pmterm * dist;
// //}
// if(buf.MFtype[i] == 0 && buf.MFtype[j] == 0)
// {
// force += pmterm * dist;
// }
//
// }
// else if(buf.MFtype[i]==0) //force from boundary particles to fluid particles
// {
// //pmterm = -0.5f * cmterm * (ipress + ipressure_modify + buf.mpress[j])*idens;
// //pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
//
// ////注释掉
// ////pressure
// //pmterm = - cmterm1 * buf.mf_restdensity[i] * buf.mf_restmass[j] /buf.mf_restdensity[j] *ipress *buf.mdensity[i]*buf.mdensity[i];
// //force += pmterm * dist * simData.omega;
//
// ////viscosity
// //vmr = iveleval - buf.mveleval[j]; //This is different from that in contributeAlphaChange()
// //float pi_ij = vmr.x*dist.x + vmr.y*dist.y + vmr.z*dist.z;
// //if(pi_ij < 0){
// // pi_ij = pi_ij / (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z + r2 * 0.01);
// // pi_ij = pi_ij * 2 * simData.psmoothradius * (ivisc + buf.mf_visc[j]) * idens /2;
// // pi_ij = - cmterm1 * buf.mf_restdensity[i] * buf.mf_restmass[j]/buf.mf_restdensity[j] * pi_ij;
// // force += pi_ij * dist * simData.visc_factor;
// //
// //}
//
// //vmterm = cmterm * (ivisc+buf.mf_visc[j]) * idens;
// //force += vmterm * vmr*0.03;
// }
// else{ //force from boundary particles to deformable/rigid particles
// /*
// pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
// force += pmterm * dist*0.03;
// vmr = iveleval - buf.mveleval[j];
// vmterm = cmterm * (ivisc+buf.mf_visc[j]) * idens;
// force += vmterm * vmr*0.03;*/
//
// //pressure
// pmterm = - cmterm1 * buf.mf_restdensity[i] * buf.mf_restmass[j] / buf.mf_restdensity[j] * (ipress) *buf.mdensity[i]*buf.mdensity[i];
// force += pmterm * dist * simData.omega;
//
// //viscosity
// vmr = iveleval - buf.mveleval[j]; //This is different from that in contributeAlphaChange()
// float pi_ij = vmr.x*dist.x + vmr.y*dist.y + vmr.z*dist.z;
// if(pi_ij < 0){
// pi_ij = pi_ij / (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z + r2 * 0.01);
// pi_ij = pi_ij * 2 * simData.psmoothradius * (ivisc + buf.mf_visc[j]) * idens /2;
// pi_ij = - cmterm1 * buf.mf_restdensity[i] * buf.mf_restmass[j]/buf.mf_restdensity[j] * pi_ij;
// force += pi_ij * dist * simData.visc_factor;
// }
// }
//
// if (buf.misbound[j] != 1)
// //T_dm
// for(uint fcount = 0; fcount < simData.mf_catnum; fcount++)
// {
// float3 dtermj = cmterm * (buf.mf_vel_phrel[mulj+fcount].x * dist.x + buf.mf_vel_phrel[mulj+fcount].y * dist.y + buf.mf_vel_phrel[mulj+fcount].z * dist.z) * buf.mf_alpha_next[mulj+fcount] * buf.mf_vel_phrel[mulj+fcount];
// float3 dtermi = cmterm * (ivmk[fcount].x * dist.x + ivmk[fcount].y * dist.y + ivmk[fcount].z * dist.z) * ialpha_pre[fcount] * ivmk[fcount];
// force += (dtermj + dtermi) * simData.mf_dens[fcount] * idens;
// }
//
//#else
// pmterm = -0.5f * cmterm * (ipress + ipressure_modify + buf.mpress[j] + buf.mf_pressure_modify[j])*idens;
// //pmterm = -0.5f * cmterm * (ipress + buf.mpress[j])*idens;
// force += pmterm * dist;
// //viscosity
// vmr = iveleval - buf.mveleval[j]; //This is different from that in contributeAlphaChange()
// vmterm = cmterm * (ivisc+buf.mf_visc[j]) * idens;
// force += vmterm * vmr;
// for(uint fcount = 0; fcount < simData.mf_catnum; fcount++)
// {
// float3 dtermj = cmterm * (buf.mf_vel_phrel[mulj+fcount].x * dist.x + buf.mf_vel_phrel[mulj+fcount].y * dist.y + buf.mf_vel_phrel[mulj+fcount].z * dist.z) * buf.mf_alpha_pre[mulj+fcount] * buf.mf_vel_phrel[mulj+fcount];
// float3 dtermi = cmterm * (ivmk[fcount].x * dist.x + ivmk[fcount].y * dist.y + ivmk[fcount].z * dist.z) * ialpha_pre[fcount] * ivmk[fcount];
// force += (dtermj + dtermi) * simData.mf_dens[fcount] * idens;
// }
//
//#endif
//#ifndef _nXSPH
// //XSPH correction
// aveDenij = 2/(1/buf.mdensity[j]+1/idens);
// xterm = cx*cx*cx*buf.mf_restmass[j]*aveDenij*simData.poly6kern*0.5; //0.5=epsilon
// ivelxcor->x += -vmr.x * xterm;
// ivelxcor->y += -vmr.y * xterm;
// ivelxcor->z += -vmr.z * xterm;
// }
//#endif
//
// }
// return force;
//}
//__global__ void ComputeForce_projectu ( bufList buf, int pnum)
//{
// uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
// if ( i >= pnum)
// return;
//#ifdef NEW_BOUND
// if(buf.misbound[i]==1)
// return;
//#endif
// // Get search cell
// int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
// uint gc = buf.mgcell[ i ];
// if ( gc == GRID_UNDEF ) return; // particle out-of-range
// gc -= nadj;
//
// register uint muloffseti = i * MAX_FLUIDNUM;
// register float3 ipos = buf.mpos[ i ];
// register float3 iveleval = buf.mveleval[ i ];
//
// register float idens = buf.mdensity[ i ];
// register float alpha_pre[MAX_FLUIDNUM];
// register float3 ivmk[MAX_FLUIDNUM];
// register float pressure_modify = buf.mf_pressure_modify[i];
// register float3 *ivelxcor = buf.mf_velxcor+i;
// register float ivisc = buf.mf_visc[i];
//
// register float ipress;
// if(buf.MFtype[i]==0)
// ipress = buf.mpress[i] + buf.mf_pressure_modify[i];
// else
// ipress = buf.mpress[i];
//
// register float3 force = make_float3(0,0,0);
// *ivelxcor = make_float3(0,0,0);
//
// for(uint fcount = 0;fcount < simData.mf_catnum; fcount++)
// {
// alpha_pre[fcount] = buf.mf_alpha_next[muloffseti+fcount];
// ivmk[fcount] = buf.mf_vel_phrel[muloffseti+fcount];
// }
//
// for (int c=0; c < simData.gridAdjCnt; c++) {
// force += contributeForce_projectu (i, muloffseti, ipos, iveleval, ipress, idens, gc + simData.gridAdj[c], buf, alpha_pre, pressure_modify, ivmk, ivelxcor, ivisc);
// }
// /*if (buf.MFtype[i] == 0 && i % 1000 == 0)
// printf("fluid force is (%f,%f,%f)\n", force.x, force.y, force.z);*/
// //if (buf.MFtype[i] == 1 && buf.elasticID[i] == 6)
// // printf("fluid force is (%f,%f,%f)\n", force.x, force.y, force.z);
// buf.mforce[ i ] = force;
//}
//__device__ void contributeVelocityGradient(float* result, int i, float3 ipos, float3 iveleval, int cell, bufList buf)
//{
// float dsq, c;
// register float d2 = simData.psimscale * simData.psimscale;
// register float r2 = simData.r2/d2;
//
// float3 dist, jveleval;
// float cmterm;
//// float massj,massi;
//
//// float q;
// int j;
//// float aveDenij,cx,xterm;
//
// if ( buf.mgridcnt[cell] == 0 ) return;
//
// int cfirst = buf.mgridoff[ cell ];
// int clast = cfirst + buf.mgridcnt[ cell ];
//
// //massi = buf.mf_restmass[i];
// for ( int cndx = cfirst; cndx < clast; cndx++ )
// {
// j = buf.mgrid[ cndx ];
// if( buf.MFtype[j] != 2)
// continue;
//
// //massj = buf.mf_restmass[j];
// //jveleval = buf.mveleval[j]*buf.mdensity[j]*buf.mdensity[j] + iveleval*buf.mdensity[i]*buf.mdensity[i];
// jveleval = buf.mveleval[j]-iveleval;
//
// dist = ( ipos - buf.mpos[ j ] ); // dist in cm
// dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
// dist *= simData.psimscale;
//
// if ( dsq < r2 && dsq > 0) {
// dsq = sqrt(dsq * d2);
// c = ( simData.psmoothradius - dsq );
// cmterm = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.mdensity[j];
// //cmterm = simData.spikykern * c * c / dsq;
// jveleval = jveleval * cmterm;
// result[0] += jveleval.x * dist.x; result[1] += jveleval.x * dist.y; result[2] += jveleval.x * dist.z;
// result[3] += jveleval.y * dist.x; result[4] += jveleval.y * dist.y; result[5] += jveleval.y * dist.z;
// result[6] += jveleval.z * dist.x; result[7] += jveleval.z * dist.y; result[8] += jveleval.z * dist.z;
// }
// }
//}
__device__ void print9(char* string,float* buf){
printf("%s\n%f %f %f\n%f %f %f\n%f %f %f\n",string,buf[0],buf[1],buf[2],
buf[3],buf[4],buf[5],buf[6],buf[7],buf[8]);
return;
}
__device__ float3 getBoundForce(int i,bufList buf, float3 force, float time){
register float3 accel, norm;
register float diff, adj, speed;
register float3 pos = buf.mpos[i];
register float3 veval = buf.mveleval[i];
accel = force;
// if (buf.MFtype[i] == 1)
// {
// // Boundaries
// // Y-axis
// diff = simData.pradius - (pos.y - (simData.pboundmin.y + (pos.x - simData.pboundmin.x)*simData.pground_slope)) * simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(-simData.pground_slope, 1.0 - simData.pground_slope, 0);
// adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
//
// //float3 veldamp=make_float3(veval.x, 0, veval.z);
// //buf.mveleval[i] -= veldamp * simData.omega;
// //veldamp=make_float3(vel.x, 0, vel.z);
// //buf.mvel[i] -= veldamp * simData.omega;
// }
//
// diff = simData.pradius - (simData.pboundmax.y - pos.y)*simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(0, -1, 0);
// adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
// }
//
//#ifdef _xzsoftmargin
// // X-axis
// diff = simData.pradius - (pos.x - (simData.pboundmin.x + (sin(time*simData.pforce_freq) + 1)*0.5 * simData.pforce_min))*simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(1, 0, 0);
// adj = (simData.pforce_min + 1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
// }
// diff = simData.pradius - ((simData.pboundmax.x - (sin(time*simData.pforce_freq) + 1)*0.5*simData.pforce_max) - pos.x)*simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(-1, 0, 0);
// adj = (simData.pforce_max + 1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
// }
//
// // Z-axis
// diff = simData.pradius - (pos.z - simData.pboundmin.z) * simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(0, 0, 1);
// adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
// }
// diff = simData.pradius - (simData.pboundmax.z - pos.z)*simData.psimscale;
// // if (diff>simData.pradius) diff += simData.pradius*1000;
// if (diff > EPSILON) {
// norm = make_float3(0, 0, -1);
// adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval);
// norm *= adj; accel += norm;//*scale_dens;
// }
//#endif
// }
//if (i % 500 == 0&&buf.misbound[i]!=1)
// printf("particle %d's accel is (%f,%f,%f)\n", i, accel.x, accel.y, accel.z);
// Accel Limit
/*speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z;
if ( speed > simData.AL2 ) {
accel *= simData.AL / sqrt(speed);
}*/
// Gravity
//accel += simData.pgravity;
return accel;
}
//__global__ void AddSPHtensorForce( bufList buf, int pnum, float time)
//{
// uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
// if ( i >= pnum) return;
// //if(buf.MFtype[i] != 1)
// // return;
//
// // Get search cell
// int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1;
// uint gc = buf.mgcell[ i ];
// if ( gc == GRID_UNDEF ) return; // particle out-of-range
// gc -= nadj;
//
//// register float3 ipos = buf.mpos[ i ];
//// float *itensor = buf.MFtemptensor + i*9;
//// float3 tensorForce = make_float3(0,0,0);
////
////
//
//// /*if(i%1000==0&&buf.misbound[i]!=1)
//// printf("%d tensorforce: %f %f %f\n",i, tensorForce.x, tensorForce.y, tensorForce.z);
////*/
//// buf.mforce[i] = buf.mforce[i] + tensorForce;
//// if (buf.MFtype[i] == 1 && buf.elasticID[i] == 1600)
//// printf("tensor force is (%f,%f,%f)\n", tensorForce.x, tensorForce.y, tensorForce.z);
// //Get Other force!
// buf.maccel[i] = buf.mforce[i];
// //if (buf.MFtype[i] == 1 && (buf.elasticID[i] == 6 || buf.elasticID[i] == 31))
// // printf("final force %d's is %f,%f,%f\n", buf.elasticID[i], buf.mvel[i].x, buf.mvel[i].y, buf.mvel[i].z);
// buf.mforce[i] = make_float3(0,0,0);
//}
//********************** end project-u ************************
void floatup_cuda(int mode){
fcuda.gravityfree = mode;
checkCudaErrors ( cudaMemcpyToSymbol ( simData, &fcuda, sizeof(FluidParams) ) );
return;
}
__global__ void updatePosition(float time, bufList buf, int pnum){
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
if ( buf.mgcell[i] == GRID_UNDEF ) {
buf.mpos[i] = make_float3(-1000,-1000,-1000);
buf.maccel[i] = make_float3(0,0,0);
return;
}
// Get particle vars
register float3 accel, norm;
register float diff, adj, speed;
register float3 pos = buf.mpos[i];
register float3 veval = buf.mveleval[i];
float3 vel = buf.maccel[i];
register float newdens,newvisc, newmass;
// Leapfrog integration
accel = buf.maccel[i];
float beta[MAX_FLUIDNUM];
if (buf.misbound[i] != 1)
{
//float3 vnext = accel*simData.mf_dt + vel; // v(t+1/2) = v(t-1/2) + a(t) dt
//float3 tmpdeltaPos = (vnext + buf.mf_velxcor[i]) * (simData.mf_dt/simData.psimscale);
//float3 tmpPos = buf.mpos[i] + tmpdeltaPos;
buf.mforce[i] = accel; //use mvel to restore the first acceleration
float3 dPos = (buf.mveleval[i]*simData.mf_dt + 0.5* accel* simData.mf_dt* simData.mf_dt)/simData.psimscale;
buf.mpos[i] = buf.mpos[i] + dPos;
//Color Setting
//buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM+2],buf.mf_alpha[i*MAX_FLUIDNUM+1],buf.mf_alpha[i*MAX_FLUIDNUM+0],1);
//if(buf.MFtype[i]==0)
// buf.mclr[i] = COLORA(1,1,1,1);
//else
if (buf.MFtype[i] == 2 || (_example == 2&&buf.MFtype[i] >= 2))
{
//buf.mclr[i] = COLORA(1, 1, 0, 0.6);
int index = buf.elasticID[i];
for (int k = 1; k < MAX_FLUIDNUM; ++k)
beta[k] = buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k * MAX_SOLIDNUM + buf.MFtype[i] - 2];
if (_example == 2)
{
//float ratio = 3;
if (buf.MFtype[i] == 5)
buf.mclr[i] =
COLORA(1 / (1 + sqrt(beta[2] + beta[3])), 1 / (1 + sqrt(beta[1] + beta[3])), sqrt(beta[3])/(1+sqrt(beta[3])), !simData.HideSolid);
else
{
buf.mclr[i] =
COLORA(0, 1, 0, !simData.HideSolid);
}
}
else
buf.mclr[i] =
COLORA(1 - (beta[2] + beta[3]), 1 - (beta[1] + beta[3]), 1 - (beta[1] + beta[2]), !simData.HideSolid);
}
else
{
//for (int k = 1; k < MAX_FLUIDNUM; ++k)
// beta[k] = buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k * MAX_SOLIDNUM + 3];
//if(!simData.HideFluid)
buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2],
buf.mf_alpha[i*MAX_FLUIDNUM + 3],!simData.HideFluid*0.55*
(buf.mf_alpha[i*MAX_FLUIDNUM + 1] + buf.mf_alpha[i*MAX_FLUIDNUM + 2] +
buf.mf_alpha[i*MAX_FLUIDNUM + 3]));
//else
// buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM + 1] + beta[3], buf.mf_alpha[i*MAX_FLUIDNUM + 2] + beta[2],
// buf.mf_alpha[i*MAX_FLUIDNUM + 3]+beta[1], buf.isInside[i]*0.55);
//else
// buf.mclr[i] = COLORA(beta[3], beta[2], beta[1], beta[3] + beta[2] + beta[1]);
//buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM + 2] + buf.mf_beta[i*MAX_FLUIDNUM + 2],
// buf.mf_alpha[i*MAX_FLUIDNUM + 1] + buf.mf_beta[i*MAX_FLUIDNUM + 1],
// buf.mf_alpha[i*MAX_FLUIDNUM + 0] + buf.mf_beta[i*MAX_FLUIDNUM + 0], !simData.HideFluid);
//buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM + 2],
// buf.mf_alpha[i*MAX_FLUIDNUM + 1],
// buf.mf_alpha[i*MAX_FLUIDNUM + 0], !simData.HideFluid*(buf.mf_alpha[i*MAX_FLUIDNUM + 1]+ buf.mf_alpha[i*MAX_FLUIDNUM + 2]));
}
//buf.mclr[i] = COLORA(buf.mf_alpha[i*MAX_FLUIDNUM + 2], buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 0], 0);
}
else if (buf.misbound[i] == 1)
{
buf.mveleval[i] = make_float3(0,0,0); // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5
buf.maccel[i] = make_float3(0,0,0);
buf.mforce[i] = make_float3(0,0,0);
if (buf.MFtype[i] > 2)
{
for (int k = 1; k < MAX_FLUIDNUM; ++k)
beta[k] = buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k * MAX_SOLIDNUM + buf.MFtype[i] - 2];
float sum = beta[1] + beta[2] + beta[3] + 1;
buf.mclr[i] =
COLORA(1 - (beta[2] + beta[3]), 1 - (beta[1] + beta[3]), 1 - (beta[1] + beta[2]), !simData.HideRigid);
//buf.mclr[i] = COLORA((sqrt(beta[1]))/sum, (sqrt(beta[2]))/sum, (sqrt(beta[3]))/sum, !simData.HideRigid*(beta[1]+beta[2]+beta[3]));
//buf.mclr[i] = COLORA((1+beta[1])/sum, (1+beta[2])/sum, (1+beta[3])/sum, !simData.HideRigid);
//buf.mclr[i] = COLORA(1, 1, 1, !simData.HideBound);
}
else
{
buf.mclr[i] = COLORA(1, 1, 1, !simData.HideBound);
}
}
buf.mforce[i] = make_float3(0, 0, 0);
}
__global__ void updateVelocity(float time, bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if ( i >= pnum ) return;
//if (buf.MFtype[i] == 3)return;
if ( buf.mgcell[i] == GRID_UNDEF ) {
buf.mpos[i] = make_float3(-1000,-1000,-1000);
buf.maccel[i] = make_float3(0,0,0);
return;
}
// Get particle vars
register float3 accel, accel1, accel2;
register float speed;
// Leapfrog integration
accel = buf.maccel[i];
if (isnan(dot(accel, accel)))
printf("particle %d's type is %d, accel is nan\n",
i, buf.MFtype[i]);
//if (buf.MFtype[i] == 0 && i % 10000 == 0)
// printf("particle %d's mixture vel is (%f,%f,%f), fluid vel is (%f,%f,%f)\n",
// i, buf.mveleval[i].x, buf.mveleval[i].y, buf.mveleval[i].z,
// buf.fluidVel[i*MAX_FLUIDNUM + 1].x, buf.fluidVel[i*MAX_FLUIDNUM + 1].y,
// buf.fluidVel[i*MAX_FLUIDNUM + 1].z);
speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z;
if (speed > simData.AL2) {
accel *= simData.AL / sqrt(speed);
}
accel += simData.pgravity;
buf.maccel[i] = accel;
////int index;
//if(simData.example == 1 || simData.example == 2)
// if (buf.MFtype[i] == 1)
// {
// int index = buf.elasticID[i];
// if(buf.frame[index] > 1200 && buf.frame[index] < 1600)
// accel -= 3 * simData.pgravity;
// if (buf.frame[index] == 1600)
// {
// buf.mveleval[i] = make_float3(0, 0, 0);
// accel -= simData.pgravity;
// }
// if (buf.frame[index] >= 1600)
// {
// accel -= simData.pgravity;
// if (buf.isSurface[index] && buf.frame[index] <= 2000 && buf.frame[index] >= 1800 && simData.example == 1)
// accel += -300 * buf.normal[index];
// }
// }
if (buf.misbound[i] != 1)
{
buf.mveleval[i] = buf.mveleval[i] + simData.mf_dt*accel;
{
//buf.mveleval[i] += (1-buf.fluidPercent[i])*simData.mf_dt*buf.poroForce[i];
float vm = dot(buf.mveleval[i], buf.mveleval[i]);// .x*buf.mveleval[i].x + buf.mveleval[i].y*buf.mveleval[i].y + buf.mveleval[i].z*buf.mveleval[i].z;
vm = sqrt(vm);
if (vm > simData.VL)
{
buf.mveleval[i] *= simData.VL / vm;
}
}
}
else if (buf.misbound[i] == 1)
{
buf.mveleval[i] = make_float3(0,0,0); // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5
buf.maccel[i] = make_float3(0,0,0);
buf.mforce[i] = make_float3(0,0,0);
//buf.mclr[i] = COLORA(1,1,1,0.8);
}
//buf.vel_mid[i] = buf.mveleval[i];
}
__global__ void computeMidVel(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
//if (buf.MFtype[i] == 3)return;
if (buf.mgcell[i] == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
// Get particle vars
register float3 accel, norm, pos = buf.mpos[i];
register float speed;
//buf.vel_mid[i] = buf.mveleval[i];
//if (dot(buf.vel_mid[i], buf.vel_mid[i])!=dot(buf.mveleval[i], buf.mveleval[i]))
// printf("particle %d's type is %d, vel is (%f,%f,%f), vel_mid is (%f,%f,%f)\n",
// i, buf.MFtype[i], buf.mveleval[i].x, buf.mveleval[i].y, buf.mveleval[i].z,
// buf.vel_mid[i].x, buf.vel_mid[i].y, buf.vel_mid[i].z);
// float scale_dens = 1000.0/buf.mf_restdensity[i];
accel = buf.maccel[i];
speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z;
if (speed > simData.AL2) {
accel *= simData.AL / sqrt(speed);
}
buf.mforce[i] = accel;
buf.fluidForce[i] = accel;
buf.maccel[i] = buf.mforce[i];
if (buf.misbound[i] != 1)
{
buf.vel_mid[i] = buf.mveleval[i] + simData.mf_dt*accel;
}
else
{
buf.mveleval[i] = make_float3(0, 0, 0); // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5
buf.maccel[i] = make_float3(0, 0, 0);
buf.mforce[i] = make_float3(0, 0, 0);
buf.vel_mid[i] = make_float3(0, 0, 0);
//buf.mclr[i] = COLORA(1,1,1,0.8);
}
//buf.maccel[i] = make_float3(0, 0, 0);
//buf.mforce[i] = make_float3(0, 0, 0);
}
void LeapFrogIntegration(float time){
updateVelocity<<<fcuda.numBlocks, fcuda.numThreads>>>(time, fbuf, fcuda.pnum);
cudaThreadSynchronize();
updatePosition << <fcuda.numBlocks, fcuda.numThreads >> >(time, fbuf, fcuda.pnum);
cudaThreadSynchronize();
}
//****An Implicit SPH Formulation for Incompressible Linearly Elastic Solids*************
__global__ void ComputeMap(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
int elasticIndex = buf.elasticID[i];
int j = 0;
for(int l=0;l<buf.neighborNum[elasticIndex];++l)
{
j = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
for(int k=0;k<buf.neighborNum[j];++k)
if(elasticIndex == buf.neighborID[j*simData.maxNeighborNum +k])
{
//if (elasticIndex == 1600)
//{
// printf("elastic id: %d,neighborID:%d\n", buf.elasticID[i], j);
//}
buf.neighborIndex[elasticIndex * simData.maxNeighborNum + l] = k;
break;
}
}
//if (elasticIndex == 1600)
// printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
}
//compute only once
__global__ void ComputeCorrectL(bufList buf,int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
gc -= nadj;
float correctL[9];
for (int l = 0; l < 9; ++l)
correctL[l] = 0;
int index = 0;
int jndex, j;
int elasticIndex = buf.elasticID[i];
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float pmterm;
float3 dist, vmr;
//if(elasticIndex == 1600)
//printf("particle %d's elasticIndex is %d\n", i, elasticIndex);
//if (elasticIndex >= simData.numElasticPoints)
//printf("elasticIndex = %d and limit %d\n", elasticIndex, simData.numElasticPoints);
//fbuf.elasticID[elasticIndex] = elasticIndex;
//buf.initialVolume[elasticIndex] = buf.mf_restmass[i] * buf.mdensity[i];
for (int l = 0; l < buf.neighborNum[elasticIndex]; l++)
{
jndex = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
dsq = sqrt(dsq*d2);
c = simData.psmoothradius - dsq;
dist *= simData.psimscale;
pmterm = buf.initialVolume[jndex] * simData.spikykern * c * c / dsq;
//pmterm = buf.initialVolume[jndex] * simData.spikykern * c * c;//v_j 0
correctL[0] += -pmterm * dist.x*dist.x; correctL[1] += -pmterm * dist.x*dist.y; correctL[2] += -pmterm * dist.x*dist.z;
correctL[3] += -pmterm * dist.y*dist.x; correctL[4] += -pmterm * dist.y*dist.y; correctL[5] += -pmterm * dist.y*dist.z;
correctL[6] += -pmterm * dist.z*dist.x; correctL[7] += -pmterm * dist.z*dist.y; correctL[8] += -pmterm * dist.z*dist.z;
}
if (det(correctL) != 0) {
/*if (i % 1000 == 0)
printf("particle %d's L is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n", i,
correctL[0], correctL[1], correctL[2],
correctL[3], correctL[4], correctL[5],
correctL[6], correctL[7], correctL[8]);*/
InverseMatrix3(correctL);
/*if (elasticIndex == 0)
printf("particle %d's inverseL is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n", i,
correctL[0], correctL[1], correctL[2],
correctL[3], correctL[4], correctL[5],
correctL[6], correctL[7], correctL[8]);*/
}
else
printf("ERROR:particle %d's correctL cannot be inversed! neighbor num is %d, correctL is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
i, buf.neighborNum[elasticIndex], correctL[0], correctL[1],correctL[2],correctL[3],
correctL[4],correctL[5],correctL[6],correctL[7],correctL[8]);
// float3 dist;
// float c;
// int jndex;
for(int l=0;l<buf.neighborNum[elasticIndex];++l)
{
dist = buf.neighborDistance[elasticIndex * simData.maxNeighborNum + l];
dsq = sqrt(dot(dist, dist));
c = simData.psmoothradius - dsq;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].x = correctL[0] * dist.x + correctL[1] * dist.y + correctL[2] * dist.z;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].y = correctL[3] * dist.x + correctL[4] * dist.y + correctL[5] * dist.z;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].z = correctL[6] * dist.x + correctL[7] * dist.y + correctL[8] * dist.z;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].x *= simData.spikykern *c *c/dsq;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].y *= simData.spikykern *c *c/dsq;
buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l].z *= simData.spikykern *c *c/dsq;
//jndex = buf.neighborID[elasticIndex];
//buf.initialVolume[elasticIndex] += simData.poly6kern * pow(c, 3) * buf.mf_restmass[i] * buf.mdensity[buf.particleID[jndex]];
}
buf.frame[elasticIndex] = 0;
//if (i % 1000 == 0)
// printf("initial volume is %f\n", 1000000*buf.initialVolume[elasticIndex]);
}
__global__ void CheckCorrectedKernelGradientError(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
int index = buf.elasticID[i];
int jndex, j;
float3 dist;
float check[9] = {0,0,0,0,0,0,0,0,0};
float temp[9];
//printf("particle %d's elasticIndex is %d\n", i, index);
//if(index == 1600)
// printf("initial neighbor num is %d\n", buf.neighborNum[index]);
for(int l=0;l<buf.neighborNum[index];++l)
{
jndex = buf.neighborID[index * simData.maxNeighborNum + l];
dist = -buf.neighborDistance[index * simData.maxNeighborNum + l];
//if (index == 100)
// printf("initial dist with %d is (%f,%f,%f)\n", jndex,dist.x, dist.y, dist.z);
/* if (index == 100 && jndex == 99)
printf("initial dist is %f,%f,%f\n", dist.x, dist.y, dist.z);*/
dist *= buf.initialVolume[jndex];
/*if (index == 100 && jndex == 99)
printf("initial kernel is %f,%f,%f\n", elasticInfo.kernelGrad[index * 600 + l].x, elasticInfo.kernelGrad[index * 600 + l].y, elasticInfo.kernelGrad[index * 600 + l].z);
*/
/*if (index == 100 && elasticInfo.neighborID[index * 600 + l] == 99)
printf("initial volume is %.15f\n", elasticInfo.initialVolume[jndex]);*/
tensorProduct(dist, buf.kernelGrad[index * simData.maxNeighborNum + l], temp);
for (int k = 0; k < 9; ++k)
check[k] += temp[k];
}
if (index == 1600)
printf("checkError is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
check[0], check[1], check[2],
check[3], check[4], check[5],
check[6], check[7], check[8]);
}
__device__ void contributeVolume(int i, int cell, bufList buf, int& index, float& volume)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
int j, jndex;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
int elasticIndex = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 2 || (_example == 2 && buf.MFtype[j] >= 2))
{
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
if (index >= simData.maxNeighborNum)
return;
dsq = sqrt(dsq*d2);
c = simData.psmoothradius - dsq;
jndex = buf.elasticID[j];
buf.neighborID[elasticIndex * simData.maxNeighborNum + index] = jndex;
dist *= simData.psimscale;
buf.neighborDistance[elasticIndex * simData.maxNeighborNum + index] = dist;
volume += pow(buf.mf_restmass[j] * buf.density_solid[j], 2)
* simData.poly6kern * pow((r2*d2 - dsq*dsq), 3);
index++;
}
}
}
__global__ void ComputeInitialVolume(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
gc -= nadj;
int index = 0;
int elasticIndex = buf.elasticID[i];
buf.initialVolume[elasticIndex] = 0;
buf.particleID[elasticIndex] = i;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributeVolume(i, gc + simData.gridAdj[c], buf, index, buf.initialVolume[elasticIndex]);
if (index >= simData.maxNeighborNum)
printf("ERROR:Neighbor space is not enough!\n");
}
//buf.initialVolume[elasticIndex] = pow(simData.psmoothradius / 2, 3);
//buf.initialVolume[elasticIndex] +=
// pow(buf.mf_restmass[i] * buf.density_solid[elasticIndex], 2)*pow(simData.r2, 3)*simData.poly6kern;
//if(elasticIndex%1000==0)
//printf("elastic particle %d's initial volume is %.10f\n", elasticIndex, buf.initialVolume[elasticIndex]);
buf.neighborNum[elasticIndex] = index;
//if (buf.mpos[i].y > 20)
// buf.isHead[elasticIndex] = 1;
//else
// buf.isHead[elasticIndex] = 0;
//if (elasticIndex % 1000 == 0)
// printf("elastic particle %d's rest mass is %f, solid density is %f\n", elasticIndex, buf.mf_restmass[i], buf.density_solid[elasticIndex]);
}
void ComputeCorrectLCUDA()
{
ComputeInitialVolume << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: computeInitialVolume: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeCorrectL << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: computeCorrectL: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeMap << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: computeMap: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
CheckCorrectedKernelGradientError << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: checkCKGradError: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
//testFunc << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = cudaGetLastError();
//if (error != cudaSuccess) {
// fprintf(stderr, "CUDA ERROR: checkCKGradError: %s\n", cudaGetErrorString(error));
//}
//cudaThreadSynchronize();
}
__device__ float contributeTest(int i, int cell, bufList buf)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
//if (i % 100 == 0)
// printf("particle %d's gridcnt is %d\n", i,buf.mgridcnt[cell]);
if (buf.mgridcnt[cell] == 0) return 0;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
int elasticIndex = buf.elasticID[i];
int jndex;
float sum = 0;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
c = (r2 - dsq)*d2;
sum += buf.mf_restmass[j] / buf.mf_restdensity[j]* simData.poly6kern * pow(c, 3);
}
return sum;
}
__global__ void testFunc(bufList buf,int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
//if (buf.MFtype[i] != 1) return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
if (gc == GRID_UNDEF) {
//buf.mpos[i] = make_float3(-1000, -1000, -1000);
//buf.mvel[i] = make_float3(0, 0, 0);
return;
}
gc -= nadj;
float sum = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
sum += contributeTest(i, gc + simData.gridAdj[c], buf);
}
if (i % 1000 == 0)
printf("test sum is %f\n", sum);
//if (buf.MFtype[i] != 1) return;
//printf("particle %d is an elastic particle,ID is %d\n", i,buf.elasticID[i]);
}
__global__ void ComputeDeformGrad(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
for (int l = 0; l < 9; ++l)
buf.gradDeform[i*9+l] = 0;
float3 dist,grad;
int elasticIndex = buf.elasticID[i];
if (buf.particleID[elasticIndex] != i)
printf("map error!id is %d, i is %d\n", buf.particleID[elasticIndex], i);
//elasticInfo.particleID[elasticIndex] = i;
float tempDG[9];
int jndex, j;
//if(elasticIndex == 100)
// printf("now neighbor num is %d\n", elasticInfo.neighborNum[elasticIndex]);
for (int l = 0; l<buf.neighborNum[elasticIndex]; ++l)
{
jndex = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
if(buf.elasticID[j]!=jndex)
{
printf("map error!\n");
continue;
}
dist = (buf.mpos[j] - buf.mpos[i]) * simData.psimscale;
//if (elasticIndex == 100)
// printf("now dist with %d is (%f,%f,%f)\n", jndex, dist.x, dist.y, dist.z);
dist *= buf.initialVolume[buf.neighborID[elasticIndex * simData.maxNeighborNum + l]];
/* if (elasticIndex == 100 && elasticInfo.neighborID[elasticIndex * 600 + l] == 99)
printf("now dist is %f,%f,%f\n", dist.x, dist.y, dist.z);*/
/*if (elasticIndex == 100 && elasticInfo.neighborID[elasticIndex * 600 + l] == 99)
printf("now kernel is %f,%f,%f\n", elasticInfo.kernelGrad[elasticIndex * 600 + l].x, elasticInfo.kernelGrad[elasticIndex * 600 + l].y, elasticInfo.kernelGrad[elasticIndex * 600 + l].z);*/
/*if (elasticIndex == 100 && elasticInfo.neighborID[elasticIndex * 600 + l] == 99)
printf("now volume is %.15f\n", elasticInfo.initialVolume[jndex]);*/
grad = buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l];
tensorProduct(dist, grad, tempDG);
for (int k = 0; k < 9; ++k)
buf.gradDeform[i*9+k] += tempDG[k];
}
//if (buf.elasticID[i] == 1600)
// printf("particle %d's deform grad is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n", elasticIndex,
// buf.gradDeform[i * 9],
// buf.gradDeform[i * 9 + 1], buf.gradDeform[i * 9 + 2], buf.gradDeform[i * 9 + 3],
// buf.gradDeform[i * 9 + 4], buf.gradDeform[i * 9 + 5], buf.gradDeform[i * 9 + 6],
// buf.gradDeform[i * 9 + 7], buf.gradDeform[i * 9 + 8]);
float q[9] = { 1,0,0,0,1,0,0,0,1 };
float error = 0;
float3 t;
extractRotation(&buf.gradDeform[i * 9], q, 100);
//if (i == 37000)
// printf("q is (%f,%f,%f,%f)\n", q[0], q[1], q[2], q[3]);
for (int l = 0; l < 9; ++l)
buf.Rotation[i * 9 + l] = q[l];
for (int l = 0; l<buf.neighborNum[elasticIndex]; ++l)
{
buf.kernelRotate[elasticIndex * simData.maxNeighborNum + l] =
multiply_mv3(&buf.Rotation[i * 9], buf.kernelGrad[elasticIndex * simData.maxNeighborNum + l]);
}
/*if (buf.elasticID[i] == 100)
printf("delta error is %f\n", error);*/
/*if (buf.elasticID[i] == 1600)
printf("particle %d's rotation is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n", i,
buf.Rotation[i * 9],
buf.Rotation[i * 9 + 1], buf.Rotation[i * 9 + 2], buf.Rotation[i * 9 + 3],
buf.Rotation[i * 9 + 4], buf.Rotation[i * 9 + 5], buf.Rotation[i * 9 + 6],
buf.Rotation[i * 9 + 7], buf.Rotation[i * 9 + 8]);*/
}
__global__ void ComputeFinalDeformGrad(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
float3 rotatedKernelGrad;
//compute corotated deformation gradient
int elasticIndex = buf.elasticID[i];
if (elasticIndex < 0 || elasticIndex >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
float3 grad,dist;
float deformGrad[9];
for(int k=0;k<9;++k)
buf.gradDeform[i * 9 + k] = 0;
int j, jndex;
for (int l = 0; l<buf.neighborNum[elasticIndex]; ++l)
{
jndex = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
grad = buf.kernelRotate[elasticIndex * simData.maxNeighborNum + l];
dist = buf.mpos[j] - buf.mpos[i];
dist *= simData.psimscale;
//dist -= multiply_mv3(&buf.Rotation[i * 9], -elasticInfo.neighborDistance[elasticIndex * 600 + l]);
dist *= buf.initialVolume[jndex];
tensorProduct(dist, grad, deformGrad);
for (int k = 0; k < 9; ++k)
buf.gradDeform[i * 9 + k] += deformGrad[k];
}
//if (elasticIndex == 1600)
// printf("final deform gradient is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
// buf.gradDeform[i * 9], buf.gradDeform[i * 9 + 1], buf.gradDeform[i * 9 + 2],
// buf.gradDeform[i * 9 + 3], buf.gradDeform[i * 9 + 4], buf.gradDeform[i * 9 + 5],
// buf.gradDeform[i * 9 + 6], buf.gradDeform[i * 9 + 7], buf.gradDeform[i * 9 + 8]);
/*buf.gradDeform[i * 9] += 1;
buf.gradDeform[i * 9 + 4] += 1;
buf.gradDeform[i * 9 + 8] += 1;*/
////看看是否满足那个条件!
//float test[9] = { 0,0,0,0,0,0,0,0,0 };
//for (int l = 0; l<buf.neighborNum[elasticIndex]; ++l)
//{
// jndex = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
// j = buf.particleID[jndex];
// grad = buf.kernelRotate[elasticIndex * simData.maxNeighborNum + l];
// dist = multiply_mv3(&buf.Rotation[i * 9], -buf.neighborDistance[elasticIndex * simData.maxNeighborNum + l]);
// dist *= buf.initialVolume[buf.neighborID[elasticIndex * simData.maxNeighborNum + l]];
// tensorProduct(dist, grad, deformGrad);
// for (int k = 0; k < 9; ++k)
// test[k] += deformGrad[k];
//}
//if (elasticIndex == 100)
// printf("test matrix is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f)\n",
// test[0], test[1], test[2],
// test[3], test[4], test[5],
// test[6], test[7], test[8]);
}
__global__ void ComputeStrainAndStress(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF) {
//buf.mpos[i] = make_float3(-1000, -1000, -1000);
//buf.mvel[i] = make_float3(0, 0, 0);
return;
}
float3 rotatedKernelGrad;
//compute corotated deformation gradient
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
float3 grad, dist;
float deformGrad[9];
for (int k = 0; k<9; ++k)
buf.gradDeform[i * 9 + k] = 0;
int j, jndex;
for (int l = 0; l<buf.neighborNum[index]; ++l)
{
jndex = buf.neighborID[index * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
grad = buf.kernelRotate[index * simData.maxNeighborNum + l];
dist = buf.mpos[j] - buf.mpos[i];
dist *= simData.psimscale;
dist *= buf.initialVolume[jndex];
tensorProduct(dist, grad, deformGrad);
for (int k = 0; k < 9; ++k)
buf.gradDeform[i * 9 + k] += deformGrad[k];
}
//strain and stress
float strain[9], stress[9];
float alpha;
transmit3(&buf.gradDeform[i * 9], stress);
for (int l = 0; l < 9; ++l)
strain[l] = 0.5*(buf.gradDeform[i * 9 + l] + stress[l]);
strain[0] -= 1; strain[4] -= 1; strain[8] -= 1;
buf.volumetricStrain[index] = strain[0] + strain[4] + strain[8];
float lambda = simData.lambda;
float tr_strain = strain[0] + strain[4] + strain[8];
for (int l = 0; l < 9; ++l)
stress[l] = 2 * simData.miu * strain[l];
stress[0] += lambda * tr_strain; stress[4] += lambda * tr_strain; stress[8] += lambda * tr_strain;
alpha = simData.poroDeformStrength*(1 - simData.bulkModulus_porous / simData.bulkModulus_grains) * buf.pressure_water[i*MAX_FLUIDNUM];
stress[0] -= alpha;
stress[4] -= alpha;
stress[8] -= alpha;
for (int l = 0; l < 9; ++l)
buf.gradDeform[i * 9 + l] = stress[l];
}
__global__ void ComputeElasticForce(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
//buf.mpos[i] = make_float3(-1000, -1000, -1000);
//buf.mvel[i] = make_float3(0, 0, 0);
return;
}
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
int j, jndex, k;
float3 force = make_float3(0, 0, 0);
float3 t1, t2;
for (int l = 0; l<buf.neighborNum[index]; ++l)
{
jndex = buf.neighborID[index * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
k = buf.neighborIndex[index * simData.maxNeighborNum + l];
t1 = multiply_mv3(&buf.gradDeform[i * 9], buf.kernelRotate[index * simData.maxNeighborNum + l]);
t1 -= multiply_mv3(&buf.gradDeform[j * 9], buf.kernelRotate[jndex * simData.maxNeighborNum + k]);
t1 *= buf.initialVolume[index];
t1 *= buf.initialVolume[jndex];
force += t1;
}
//if (index % 30000 == 0)
// printf("solid particle %d's elastic force is (%f,%f,%f)\n", index, force.x, force.y, force.z);
//buf.mforce[i] += force;
//buf.maccel[i] += force;
buf.bx[index] = buf.mveleval[i].x + simData.mf_dt*force.x / buf.mf_restmass[i];
buf.by[index] = buf.mveleval[i].y + simData.mf_dt*force.y / buf.mf_restmass[i];
buf.bz[index] = buf.mveleval[i].z + simData.mf_dt*force.z / buf.mf_restmass[i];
//if (index % 10000 == 0)
// printf("b is (%f,%f,%f)\n", buf.bx[index], buf.by[index], buf.bz[index]);
buf.vx[index] = buf.mveleval[i].x; buf.vy[index] = buf.mveleval[i].y; buf.vz[index] = buf.mveleval[i].z;
}
__global__ void ComputeIterationStrainAndStress(bufList buf, int pnum, float* px, float*py, float*pz)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
//if(i%100 == 0)
//printf("particle %d's elasticID is %d\n", i, buf.elasticID[i]);
if (gc == GRID_UNDEF) {
buf.mpos[i] = make_float3(-1000, -1000, -1000);
buf.maccel[i] = make_float3(0, 0, 0);
return;
}
float3 rotatedKernelGrad;
//compute corotated deformation gradient
int elasticIndex = buf.elasticID[i];
if (elasticIndex < 0 || elasticIndex >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
float3 grad, dist;
float deformGrad[9];
for (int k = 0; k<9; ++k)
buf.gradDeform[i * 9 + k] = 0;
int j, jndex;
int index = buf.elasticID[i];
for (int l = 0; l<buf.neighborNum[elasticIndex]; ++l)
{
jndex = buf.neighborID[elasticIndex * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
grad = buf.kernelRotate[elasticIndex * simData.maxNeighborNum + l];
//dist = buf.mpos[j] - buf.mpos[i];
//dist *= simData.psimscale;
dist = make_float3(px[jndex] - px[elasticIndex], py[jndex] - py[elasticIndex], pz[jndex] - pz[elasticIndex]) * simData.mf_dt;
//dist -= multiply_mv3(&buf.Rotation[i * 9], -elasticInfo.neighborDistance[elasticIndex * 600 + l]);
dist *= buf.initialVolume[jndex];
tensorProduct(dist, grad, deformGrad);
for (int k = 0; k < 9; ++k)
buf.gradDeform[i * 9 + k] += deformGrad[k];
}
//strain and stress
float strain[9], stress[9];
float alpha;
transmit3(&buf.gradDeform[i * 9], stress);
for (int l = 0; l < 9; ++l)
strain[l] = 0.5*(buf.gradDeform[i * 9 + l] + stress[l]);
//strain[0] -= 1; strain[4] -= 1; strain[8] -= 1;
buf.volumetricStrain[index] = strain[0] + strain[4] + strain[8];
float lambda = simData.lambda;
float tr_strain = strain[0] + strain[4] + strain[8];
for (int l = 0; l < 9; ++l)
stress[l] = 2 * simData.miu * strain[l];
stress[0] += lambda * tr_strain; stress[4] += lambda * tr_strain; stress[8] += lambda * tr_strain;
alpha = simData.poroDeformStrength*(1 - simData.bulkModulus_porous / simData.bulkModulus_grains) * buf.pressure_water[i*MAX_FLUIDNUM];
stress[0] -= alpha;
stress[4] -= alpha;
stress[8] -= alpha;
for (int l = 0; l < 9; ++l)
buf.gradDeform[i * 9 + l] = stress[l];
}
__global__ void ComputeIterationElasticForce(bufList buf, int pnum, float* px, float*py, float*pz)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
int j, jndex, k;
float3 force = make_float3(0, 0, 0);
float3 t1, t2;
for (int l = 0; l<buf.neighborNum[index]; ++l)
{
jndex = buf.neighborID[index * simData.maxNeighborNum + l];
j = buf.particleID[jndex];
k = buf.neighborIndex[index * simData.maxNeighborNum + l];
t1 = multiply_mv3(&buf.gradDeform[i * 9], buf.kernelRotate[index * simData.maxNeighborNum + l]);
t1 -= multiply_mv3(&buf.gradDeform[j * 9], buf.kernelRotate[jndex * simData.maxNeighborNum + k]);
t1 *= buf.initialVolume[index];
t1 *= buf.initialVolume[jndex];
force += t1;
}
buf.Apx[index] = px[index] - simData.mf_dt*force.x / buf.mf_restmass[i];
buf.Apy[index] = py[index] - simData.mf_dt*force.y / buf.mf_restmass[i];
buf.Apz[index] = pz[index] - simData.mf_dt*force.z / buf.mf_restmass[i];
}
__global__ void initElasticIteration(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int index = buf.elasticID[i];
buf.px[index] = buf.rx[index] = buf.bx[index] - buf.Apx[index];
buf.py[index] = buf.ry[index] = buf.by[index] - buf.Apy[index];
buf.pz[index] = buf.rz[index] = buf.bz[index] - buf.Apz[index];
}
__global__ void updateV(bufList buf, int pnum, float3 alpha)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int index = buf.elasticID[i];
buf.vx[index] += alpha.x * buf.px[index]; buf.vy[index] += alpha.y * buf.py[index]; buf.vz[index] += alpha.z * buf.pz[index];
buf.r2x[index] = buf.rx[index] - alpha.x*buf.Apx[index];
buf.r2y[index] = buf.ry[index] - alpha.y*buf.Apy[index];
buf.r2z[index] = buf.rz[index] - alpha.z*buf.Apz[index];
}
__global__ void updateP(bufList buf, int pnum, float3 beta)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int index = buf.elasticID[i];
buf.px[index] = buf.r2x[index] + beta.x*buf.px[index];
buf.py[index] = buf.r2y[index] + beta.y*buf.py[index];
buf.pz[index] = buf.r2z[index] + beta.z*buf.pz[index];
buf.rx[index] = buf.r2x[index]; buf.ry[index] = buf.r2y[index]; buf.rz[index] = buf.r2z[index];
}
__global__ void ApplyElasticForce(bufList buf, int pnum, float* vx, float*vy, float*vz)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
float3 force;
force.x = (vx[index] - buf.mveleval[i].x) / simData.mf_dt;
force.y = (vy[index] - buf.mveleval[i].y) / simData.mf_dt;
force.z = (vz[index] - buf.mveleval[i].z) / simData.mf_dt;
buf.pressForce[i] = force;
buf.mforce[i] += force;
buf.maccel[i] += force;
}
__device__ float contributeColorField(int i, int cell, bufList buf, int& count)
{
float dsq, c, sum=0;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
float cmterm;
int j, jndex;
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
int index = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 2 || (_example == 2 && buf.MFtype[j] >= 2))
{
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dsq = sqrt(dsq*d2);
jndex = buf.elasticID[j];
c = pow(simData.r2 - dsq*dsq, 3);
cmterm = buf.mf_restmass[j] * buf.density_solid[j]*c*simData.poly6kern;
sum += cmterm;
count++;
}
}
return sum;
}
__global__ void ComputeElasticColorField(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
gc -= nadj;
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
buf.colorValue[i] = 0;
int count = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
buf.colorValue[i] += contributeColorField(i, gc + simData.gridAdj[c], buf, count);
}
if (count <= 25)
//if(count<=20)
buf.isSurface[index] = 1;
else
buf.isSurface[index] = 0;
}
__device__ float3 contributeElasticNormal(int i, int cell, bufList buf)
{
float dsq, c;
float3 sum = make_float3(0,0,0);
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
float cmterm;
int j, jndex;
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
int index = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 2 || (_example == 2 && buf.MFtype[j] >= 2))
{
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
jndex = buf.elasticID[j];
dsq = sqrt(dsq*d2);
dist *= simData.psimscale;
jndex = buf.elasticID[j];
c = simData.psmoothradius - dsq;
cmterm = buf.mf_restmass[j] * buf.density_solid[j] * c*c / dsq*simData.spikykern;
sum += cmterm * (buf.colorValue[j] - buf.colorValue[i])*dist;
}
}
return sum;
}
__global__ void ComputeElasticNormal(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (_example != 2)
{
if (buf.MFtype[i] != 2) return;
}
else
if (buf.MFtype[i] < 2)
return;
int gc = buf.mgcell[i];
if (gc == GRID_UNDEF)
{
return;
}
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
gc -= nadj;
int index = buf.elasticID[i];
if (index < 0 || index >= simData.numElasticPoints)
printf("elasticIndex wrong!\n");
buf.normal[index] = make_float3(0,0,0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
buf.normal[index] -= contributeElasticNormal(i, gc + simData.gridAdj[c], buf);
}
float d = dot(buf.normal[index], buf.normal[index]);
if (d != 0)
buf.normal[index] /= sqrt(d);
}
void ComputeElasticForceCUDA()
{
ComputeDeformGrad << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: computeCorrectL: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeStrainAndStress << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute strain and stress: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeElasticForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute elastic force: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
int countNum = 0;
float errorIter, precision = 0.01;
float3 alpha, beta;
cublasHandle_t handle;
cublasCreate(&handle);
ComputeIterationStrainAndStress << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, fbuf.vx, fbuf.vy, fbuf.vz);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute iteration strain and stress: %s\n", cudaGetErrorString(error));
}
cudaDeviceSynchronize();
ComputeIterationElasticForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, fbuf.vx, fbuf.vy, fbuf.vz);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute iteration elastic force: %s\n", cudaGetErrorString(error));
}
cudaDeviceSynchronize();
initElasticIteration << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: init elastic iteration: %s\n", cudaGetErrorString(error));
}
cudaDeviceSynchronize();
float al = -1, t1, t2, t3;
do {
countNum++;
ComputeIterationStrainAndStress << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, fbuf.px, fbuf.py, fbuf.pz);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute iteration strain and stress: %s\n", cudaGetErrorString(error));
}
cudaDeviceSynchronize();
ComputeIterationElasticForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, fbuf.px, fbuf.py, fbuf.pz);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute iteration elastic force: %s\n", cudaGetErrorString(error));
}
cudaDeviceSynchronize();
cublasSdot(handle, fcuda.numElasticPoints, fbuf.rx, 1, fbuf.rx, 1, &(alpha.x));
cublasSdot(handle, fcuda.numElasticPoints, fbuf.ry, 1, fbuf.ry, 1, &(alpha.y));
cublasSdot(handle, fcuda.numElasticPoints, fbuf.rz, 1, fbuf.rz, 1, &(alpha.z));
cublasSdot(handle, fcuda.numElasticPoints, fbuf.px, 1, fbuf.Apx, 1, &(beta.x));
cublasSdot(handle, fcuda.numElasticPoints, fbuf.py, 1, fbuf.Apy, 1, &(beta.y));
cublasSdot(handle, fcuda.numElasticPoints, fbuf.pz, 1, fbuf.Apz, 1, &(beta.z));
cudaDeviceSynchronize();
alpha /= beta;
updateV << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, alpha);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute update V: %s\n", cudaGetErrorString(error));
}
cudaDeviceSynchronize();
//t1 = 0; t2 = 0; t3 = 0;
cublasSasum(handle, fcuda.numElasticPoints, fbuf.r2x, 1, &t1);
cublasSasum(handle, fcuda.numElasticPoints, fbuf.r2y, 1, &t2);
cublasSasum(handle, fcuda.numElasticPoints, fbuf.r2z, 1, &t3);
cudaDeviceSynchronize();
errorIter = t1 + t2 + t3;
if (errorIter < precision)
break;
//printf("iter num is %d, error is %f\n", countNum, errorIter);
cublasSdot(handle, fcuda.numElasticPoints, fbuf.r2x, 1, fbuf.r2x, 1, &(beta.x));
cublasSdot(handle, fcuda.numElasticPoints, fbuf.r2y, 1, fbuf.r2y, 1, &(beta.y));
cublasSdot(handle, fcuda.numElasticPoints, fbuf.r2z, 1, fbuf.r2z, 1, &(beta.z));
cublasSdot(handle, fcuda.numElasticPoints, fbuf.rx, 1, fbuf.rx, 1, &(alpha.x));
cublasSdot(handle, fcuda.numElasticPoints, fbuf.ry, 1, fbuf.ry, 1, &(alpha.y));
cublasSdot(handle, fcuda.numElasticPoints, fbuf.rz, 1, fbuf.rz, 1, &(alpha.z));
cudaDeviceSynchronize();
beta /= alpha;
updateP << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, beta);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute update V: %s\n", cudaGetErrorString(error));
}
cudaDeviceSynchronize();
} while (countNum < 20);
//ex1 for 5, ex2 for 5
//printf("\n");
ApplyElasticForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum, fbuf.vx, fbuf.vy, fbuf.vz);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: apply elastic force: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeElasticColorField << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute elastic color field: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeElasticNormal << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute elastic normal: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
cublasDestroy(handle);
}
__device__ float contributeDivDarcyFlux(int i, int cell, bufList buf, float&normalize)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
//int jndex;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
float sum = 0;
//if (i % 100 == 0)
// printf("particle %d's gridcnt is %d\n", i,buf.mgridcnt[cell]);
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
//int index = buf.elasticID[i];
//int jndex,index = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.misbound[j])
continue;
if (buf.MFtype[i] == buf.MFtype[j] && buf.MFtype[i] == 0)
continue;
//jndex = buf.elasticID[j];
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dsq = sqrt(dsq);
c = simData.psmoothradius - dsq;
//cmterm = c*c*simData.spikykern * simData.pmass / buf.mf_restdensity[j] / dsq;
//cmterm = -1 / dsq;
cmterm = c*c*simData.spikykern * simData.pmass / simData.mf_dens[1];
//cmterm = c*c*simData.spikykern * simData.pmass * buf.density_solid[buf.elasticID[j]] / dsq;
//if (buf.MFtype[i] == buf.MFtype[j])
sum += dot((buf.gradPressure[j]+ buf.gradPressure[i])*0.5, dist/dsq)*cmterm;
normalize += cmterm;
//else
// sum += dot(buf.gradPressure[i], dist)*cmterm;
}
return sum;
}
__device__ void contributePorePressure(int i, int cell, bufList buf,float* beta, float &sum, float&b)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float3 pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
//int index = buf.elasticID[i];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] != 0)
continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
q = sqrt(dsq / r2);
if (q <= 0 || q >= 1)
continue;
if (q < 0.5)
cmterm = 6 * (q*q*q - q*q) + 1;
else
cmterm = 2 * pow(1 - q, 3);
//if (q >= 1)
// continue;
//if (q >= 0 && q <= 0.5)
// cmterm = buf.density_solid[i] * (6 * (q*q*q - q*q) + 1);
//else
// cmterm = buf.density_solid[i] * 2*pow(1-q,3);
if (buf.totalDis[j*MAX_SOLIDNUM + buf.MFtype[i] - 2] <= 0.000001)
b = 1;
cmterm *= buf.mf_restmass[j] / buf.mf_restdensity[j];
//cmterm = pow((r2 - dsq), 3)*simData.poly6kern*buf.mf_restmass[j] * buf.mdensity[j] / buf.totalDis[j];
/*if (isnan(cmterm))
continue;*/
//cmterm *= buf.mf_restmass[j] / buf.mf_restdensity[j];
//if (buf.totalDis[j*MAX_SOLIDNUM + buf.MFtype[i] - 2] == 0)
// continue;
cmterm /= buf.totalDis[j*MAX_SOLIDNUM + buf.MFtype[i] - 2];
for (int k = 1; k < MAX_FLUIDNUM; ++k)
{
//for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
sum += (buf.mf_beta[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+buf.MFtype[i]-2] * simData.mf_dens[k] / simData.mf_mass[k]) * cmterm;
beta[k] += (buf.mf_beta[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] * simData.mf_dens[k] / simData.mf_mass[k]) * cmterm;
if (isnan(sum))
{
b = buf.mf_restdensity[j];
return;
}
}
/* sum += buf.mf_beta[j*MAX_FLUIDNUM + k] * cmterm;
beta[k] += buf.mf_beta[j*MAX_FLUIDNUM + k] * cmterm;*/
}
}
}
__global__ void ComputeSolidPorePressure(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i]==1)return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
if (gc == GRID_UNDEF) {
return;
}
gc -= nadj;
float fluidSum = 0;
float beta[MAX_FLUIDNUM];
float normalize = 0;
//if (i % 10000 == 0)
// printf("pressure ratio is (%f,%f,%f,%f),(%f,%f,%f,%f),(%f,%f,%f,%f) \n",
// simData.pressRatio[4], simData.pressRatio[5], simData.pressRatio[6], simData.pressRatio[7]
// , simData.pressRatio[8], simData.pressRatio[9], simData.pressRatio[10], simData.pressRatio[11]
// , simData.pressRatio[12], simData.pressRatio[13], simData.pressRatio[14], simData.pressRatio[15]);
//if(buf.MFtype[i] == 0)
//printf("%d's type is %d, beta is (%f,%f,%f)\n", i, buf.MFtype[i], beta[0], beta[1],
// beta[2]);
float b = 10;
if (buf.MFtype[i] > 1)
{
for (int k = 0; k < MAX_FLUIDNUM; ++k)
beta[k] = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributePorePressure(i, gc + simData.gridAdj[c], buf, beta, fluidSum, b);
}
/*if (fluidSum > 0.1)
printf("fluid sum is %f, beta is (%f,%f,%f,%f)\n",
fluidSum, beta[0], beta[1], beta[2], beta[3]);*/
for (int k = 0; k < MAX_FLUIDNUM; ++k)
{
//if (buf.MFtype[i] == 2||(_example==2))
if (buf.MFtype[i] == 2)
buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+buf.MFtype[i]-2] =
simData.CoCompressibility*(fluidSum - (1 - simData.bulkModulus_porous / simData.bulkModulus_grains)*buf.volumetricStrain[buf.elasticID[i]]);
else
buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] = simData.CoCompressibility*fluidSum;
if (isnan(buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2]))
printf("solid %d's pore pressure is nan.beta is (%f,%f,%f) density solid is %f, b is %.10f\n",
i, beta[1], beta[2], beta[3], buf.density_solid[i], b);
//if(buf.mpos[i].y>60&&i%10==0)
// printf("press water is %f\n", buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2]);
buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] = beta[k];
}
float mass = simData.mf_mass[0];
for (int k = 1; k < MAX_FLUIDNUM; ++k)
mass += simData.stRatio*buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] * simData.mf_mass[buf.MFtype[i] - 2];
buf.mf_restmass[i] = mass;
/*if(buf.elasticID[i]%1000==0&& abs(buf.volumetricStrain[buf.elasticID[i]])>0.001)
printf("elastic %d's volume strain is %f\n", buf.elasticID[i],
buf.volumetricStrain[buf.elasticID[i]]);*/
}
else
{
for (int k = 1; k < MAX_FLUIDNUM; ++k)
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = simData.pressRatio[k*MAX_SOLIDNUM + l] * simData.rest_porosity*simData.CoCompressibility;// *buf.mf_alpha[i*MAX_FLUIDNUM + k];
//if (i % 10000 == 0)
// printf("%d's press water is %f\n", i, buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]);
}//buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + 0] = (simData.pressRatio[1]*buf.mf_beta[i*MAX_FLUIDNUM+1]+simData.pressRatio[2]*buf.mf_beta[i*MAX_FLUIDNUM + 2]) * simData.rest_porosity*simData.CoCompressibility;
}
}
__device__ void findNearbySolid(int i, int cell, bufList buf)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float pmterm;
int j, jndex;
//int t = -1;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 1|| buf.MFtype[j] == 0)
continue;
//if (buf.isSurface[buf.elasticID[j]] == 0)
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq2 = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq2 < r2 && dsq2 > 0))
continue;
q = sqrt(dsq2 / r2);
if (q >= 0 && q <= 0.5)
buf.totalDis[i*MAX_SOLIDNUM + buf.MFtype[j] - 2] += (6 * (pow(q, 3) - pow(q, 2)) + 1);
else
buf.totalDis[i*MAX_SOLIDNUM + buf.MFtype[j] - 2] += 2 * pow(1 - q, 3);
buf.solidCount[i*MAX_SOLIDNUM + buf.MFtype[j] - 2] += 1;
//if (q > 2)
// continue;
//if (q >= 0 && q <= 1)
// buf.totalDis[i*MAX_SOLIDNUM+buf.MFtype[j]-2] += simData.CubicSplineKern2*(1 - 1.5*q*q*(1 - q / 2));
//else
// buf.totalDis[i*MAX_SOLIDNUM + buf.MFtype[j] - 2] += simData.CubicSplineKern1*pow(2 - q, 3);
//total_dist += pow((r2 - dsq2), 3)*simData.poly6kern*buf.mf_restmass[i] * buf.mdensity[i];
//total_dist += sqrt(dsq2);
}
}
__global__ void FindNearbySolid(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
for (int k = 0; k < MAX_SOLIDNUM; ++k) {
buf.totalDis[i*MAX_SOLIDNUM + k] = 0;
buf.solidCount[i*MAX_SOLIDNUM + k] = 0;
}
for (int c = 0; c < simData.gridAdjCnt; c++)
{
findNearbySolid(i, gc + simData.gridAdj[c], buf);
}
//for (int k = 0; k < MAX_SOLIDNUM; ++k)
// buf.totalDis[i*MAX_SOLIDNUM + k] *= buf.mf_restmass[i] * buf.mdensity[i];
//if (buf.solidCount[i] >= 25)
// buf.isInside[i] = true;
//else
// buf.isInside[i] = false;
float step;
float betasum = 0;
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
if (buf.solidCount[i*MAX_SOLIDNUM + l] == 0)
{
for (int k = 1; k < simData.mf_catnum; ++k)
{
step = (-buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]);
buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = 0;
buf.mf_beta_next[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = 0;
buf.mf_alpha[i*simData.mf_catnum + k] -= step;
buf.mf_alpha_next[i*simData.mf_catnum + k] -= step;
}
}
if(l == 3)
if (buf.solidCount[i*MAX_SOLIDNUM + l] >= 22)
{
for (int k = 1; k < simData.mf_catnum; ++k)
{
step = buf.mf_alpha[i*simData.mf_catnum + k];
buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] += step;
buf.mf_beta_next[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] += step;
buf.mf_alpha[i*simData.mf_catnum + k] = 0;
buf.mf_alpha_next[i*simData.mf_catnum + k] = 0;
}
buf.isInside[i] = true;
}
for (int k = 1; k < simData.mf_catnum; ++k)
betasum += buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
}
buf.mf_alpha_sum[i] = 0;
buf.mf_restdensity_out[i] = 0;
//buf.rest_colorValue[i] = 0;
for (int k = 1; k < simData.mf_catnum; ++k)
{
buf.mf_alpha_sum[i] += buf.mf_alpha[i*simData.mf_catnum + k];
buf.mf_restdensity_out[i] += buf.mf_alpha[i*simData.mf_catnum + k] * simData.mf_dens[k];
//buf.rest_colorValue[i] += buf.mf_alpha[i*simData.mf_catnum + k] * simData.colorValue[k];
}
if (abs(betasum + buf.mf_alpha_sum[i] - 1) > 0.01 || isnan(betasum))
printf("alphasum is %f, betasum is %f\n", buf.mf_alpha_sum[i], betasum);
if (buf.mf_alpha_sum[i] > 0.0001)
buf.mf_restdensity_out[i] /= buf.mf_alpha_sum[i];
else
{
buf.mf_restdensity_out[i] = 1;
buf.mf_alpha_sum[i] = 0;
}
////if (i % 10000 == 0)
//if(buf.mf_alpha_sum[i] < 0.99)
// printf("mf_dens is (%f,%f,%f,%f), alpha sum is %f, densityout is %f, alpha is (%f,%f,%f), solid count is (%d,%d,%d,%d),beta is (%f,%f,%f,%f)(%f,%f,%f,%f)(%f,%f,%f,%f)(%f,%f,%f,%f)\n",
// simData.mf_dens[0], simData.mf_dens[1], simData.mf_dens[2], simData.mf_dens[3], buf.mf_alpha_sum[i], buf.mf_restdensity_out[i],
// buf.mf_alpha[i*simData.mf_catnum + 1], buf.mf_alpha[i*simData.mf_catnum + 2], buf.mf_alpha[i*simData.mf_catnum + 3],
// buf.solidCount[i*MAX_SOLIDNUM + 0], buf.solidCount[i*MAX_SOLIDNUM + 1], buf.solidCount[i*MAX_SOLIDNUM + 2], buf.solidCount[i*MAX_SOLIDNUM + 3],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 0 * MAX_SOLIDNUM + 0], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 1 * MAX_SOLIDNUM + 0],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 2 * MAX_SOLIDNUM + 0], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 3 * MAX_SOLIDNUM + 0],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 0 * MAX_SOLIDNUM + 1],buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 1*MAX_SOLIDNUM + 1],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 2*MAX_SOLIDNUM + 1],buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 3*MAX_SOLIDNUM + 1],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 0 * MAX_SOLIDNUM + 2], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 1 * MAX_SOLIDNUM + 2],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 2 * MAX_SOLIDNUM + 2], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 3 * MAX_SOLIDNUM + 2],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 0 * MAX_SOLIDNUM + 3], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 1 * MAX_SOLIDNUM + 3],
// buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 2 * MAX_SOLIDNUM + 3], buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + 3 * MAX_SOLIDNUM + 3]);
}
__device__ int findNearestSolid(int i, int cell, bufList buf, float*distance) {
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float pmterm;
int j, jndex;
int t = -1;
if (buf.mgridcnt[cell] == 0) return -1;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] <= 1)
continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq2 = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq2 < r2 && dsq2 > 0))
continue;
if (dsq2 < distance[buf.MFtype[j] - 2])
{
distance[buf.MFtype[j] - 2] = dsq2;
t = j;
}
}
return t;
}
__global__ void ComputeFPCorrection(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
buf.rest_colorValue[i] = simData.colorValue[0];
if (buf.MFtype[i] != 0)
return;
gc -= nadj;
float distance[MAX_SOLIDNUM];
for (int k = 0; k < MAX_SOLIDNUM; ++k) {
distance[k] = simData.r2;
}
int j = -1, t;
//buf.fluidPercent[i] = buf.nextFluidPercent[i];
float step;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
t = findNearestSolid(i, gc + simData.gridAdj[c], buf, distance);
/*if (t != -1)
j = t;*/
}
float oldFP;
}
__device__ void contributePoroVelocity(int i, int cell, bufList buf, float3* poroVel, float* normalize, float3* advectVel, int &count)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float pmterm;
int j, jndex;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q = 0;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] <= 1)
continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq2 = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
q = sqrt(dsq2 / r2);
if (q >= 1 || q <= 0)
continue;
if (q <= 0.5)
pmterm = 6 * (q*q*q - q*q) + 1;
else
pmterm = 2 * pow(1 - q, 3);
//if (q >= 2 || q <= 0)
// continue;
//if (q > 1)
// pmterm = 0.25*pow(2 - q, 3);
//else
// pmterm = 1 - 1.5*q*q*(1 - 0.5*q);
//pmterm *= buf.density_solid[j];
//pmterm *= simData.CubicSplineKern2;
for (int k = 1; k < simData.mf_catnum; k++)
{
if (isnan(dot(buf.gradPressure[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2], buf.gradPressure[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2])))
{
count++;
continue;
}
poroVel[k*MAX_SOLIDNUM + buf.MFtype[j] - 2] += pmterm * buf.gradPressure[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+ buf.MFtype[j] - 2];
advectVel[k*MAX_SOLIDNUM + buf.MFtype[j] - 2] += pmterm * buf.mveleval[j];
}
normalize[buf.MFtype[j] - 2] += pmterm;
}
return;
}
__global__ void ComputePoroVelocity(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float normalize[MAX_SOLIDNUM];// = 0;
float3 poroVel[MAX_FLUIDNUM * MAX_SOLIDNUM];
float3 advectVel[MAX_FLUIDNUM * MAX_SOLIDNUM];
float3 force, forcesum = make_float3(0,0,0);
float betadensity = 0;
float betasum = 0;
//buf.poroForce[i] = make_float3(0, 0, 0);
int count = 0;
for (int k = 1; k < simData.mf_catnum*MAX_SOLIDNUM; ++k)
{
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
poroVel[k*MAX_SOLIDNUM+l] = make_float3(0, 0, 0);
advectVel[k*MAX_SOLIDNUM + l] = make_float3(0, 0, 0);
betadensity += buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] * simData.mf_dens[k];
betasum += buf.mf_beta[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
normalize[l] = 0;
}
}
//if (buf.mf_restdensity[i] <= 10)
// printf("rest den222 is %f, alpha is (%f,%f,%f), betasum is %f\n",
// buf.mf_restdensity[i], buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2], buf.mf_alpha[i*MAX_FLUIDNUM + 3],
// betasum);
if (betadensity > 1)
betadensity /= betasum;
//int count = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributePoroVelocity(i, gc + simData.gridAdj[c], buf, poroVel, normalize, advectVel, count);
}
buf.poroForce[i] = make_float3(0, 0, 0);
float3 porevel, advectV;
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
if (normalize[l] != 0)
{
for (int k = 1; k < simData.mf_catnum; ++k)
{
porevel = poroVel[k*MAX_SOLIDNUM + l];
advectV = advectVel[k*MAX_SOLIDNUM + l];
poroVel[k*MAX_SOLIDNUM + l] /= buf.totalDis[i*MAX_SOLIDNUM + l];
advectVel[k*MAX_SOLIDNUM + l] /= buf.totalDis[i*MAX_SOLIDNUM + l];
//poroVel[k] /= abs(normalize);
//advectVel[k] /= abs(normalize);
buf.poroVel[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = poroVel[k*MAX_SOLIDNUM + l] + advectVel[k*MAX_SOLIDNUM + l];
//force = buf.mf_beta[i*MAX_FLUIDNUM + k]*(poroVel[k] - buf.mveleval[i])/simData.mf_dt;
force = simData.mf_dens[k] * buf.poroVel[i*simData.mf_catnum*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] - betadensity * buf.mveleval[i];
force *= buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l] / (simData.mf_dt*buf.mf_restdensity[i]);
//buf.mforce[i] += force;
forcesum += force;
//buf.poroForce[i] += force;
if (isnan(dot(force, force)))
printf("phase %d's pore force is nan,poro vel is (%f,%f,%f), advect vel is (%f,%f,%f), total dis is %f, count is %d\n", k,
k, porevel.x, porevel.y, porevel.z,
advectV.x, advectV.y, advectV.z,
buf.totalDis[i*MAX_SOLIDNUM + l], count);
}
//if (buf.mf_alpha[i*MAX_FLUIDNUM + 1] > 0.99 && dot(buf.poroForce[i], buf.poroForce[i]) > 1)
// printf("%d's alpha is (%f,%f), beta is (%f,%f), vel is (%f,%f,%f), poro force is (%f,%f,%f)\n",
// i, buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2],
// buf.mf_beta[i*MAX_FLUIDNUM + 1], buf.mf_beta[i*MAX_FLUIDNUM + 2], buf.mveleval[i].x, buf.mveleval[i].y,
// buf.mveleval[i].z, buf.poroForce[i].x, buf.poroForce[i].y, buf.poroForce[i].z);
}
else
{
for (int k = 1; k < simData.mf_catnum; ++k)
buf.poroVel[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = make_float3(0, 0, 0);
}
}
//if (isnan(dot(forcesum, forcesum)))
// printf("particle %d's type is %d, poro accel is nan, total distance is %f\n",
// i, buf.MFtype[i], buf.totalDis[i*MAX_SOLIDNUM + 3]);
//if (buf.MFtype[i] == 0 && i % 10000 == 0)
// printf("particle %d's mixture vel is (%f,%f,%f), fluid vel is (%f,%f,%f)\n",
// i, buf.mveleval[i].x, buf.mveleval[i].y, buf.mveleval[i].z,
// buf.fluidVel[i*MAX_FLUIDNUM + 1].x, buf.fluidVel[i*MAX_FLUIDNUM + 1].y,
// buf.fluidVel[i*MAX_FLUIDNUM + 1].z);
//betasum = forcesum.x*forcesum.x + forcesum.y*forcesum.y + forcesum.z*forcesum.z;
//if (betasum > simData.AL2) {
// forcesum *= simData.AL / sqrt(betasum);
//}
//if (buf.isInside[i] && i % 10 == 0)
// printf("p %d's poro force sum is (%f,%f,%f)\n", i, forcesum.x, forcesum.y, forcesum.z);
buf.mforce[i] += forcesum;
}
__device__ void contributeFluidFlux(int i, int cell, bufList buf, float&normalize)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float3 pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
int index = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[j] == buf.MFtype[i])
if(buf.MFtype[j] <= 1)
continue;
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dist *= simData.psimscale;
dsq = sqrt(dsq*d2);
c = (simData.psmoothradius - dsq);
cmterm = c*c*simData.spikykern*buf.mf_restmass[j] * buf.density_solid[j];
pmterm = dist / dsq*cmterm;
for (int k = 1; k < simData.mf_catnum; ++k)
{
//cmterm1 = simData.CoCompressibility * simData.rest_porosity - buf.pressure_water[j*MAX_FLUIDNUM + k];
cmterm1 = buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2]
- buf.pressure_water[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2];
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2] +=
(buf.mf_alpha[i*MAX_FLUIDNUM+k] +
buf.mf_beta[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2])
*cmterm1*pmterm;
}
normalize += cmterm;
}
}
__global__ void ComputeFluidFlux(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
//if (buf.misbound[i])
// return;
if (buf.MFtype[i] != 0)
return;
//if (buf.MFtype[i] == 1 && buf.isSurface[buf.elasticID[i]]!=1)
// return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float normalize = 0;
for(int k=1;k<simData.mf_catnum;++k)
for (int l = 0; l<MAX_SOLIDNUM; ++l)
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM+k*MAX_SOLIDNUM+l] = make_float3(0, 0, 0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributeFluidFlux(i, gc + simData.gridAdj[c], buf, normalize);
}
//if(normalize !=0)
for (int k = 1; k<simData.mf_catnum; ++k)
for (int l = 0; l < MAX_SOLIDNUM; ++l)
//buf.gradPressure[i*MAX_FLUIDNUM + k] *= simData.mf_permeability[k] / (simData.mf_visc[k]*abs(normalize));
{
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]
*= simData.capillary*simData.mf_permeability[k*MAX_SOLIDNUM + l] / simData.mf_visc[k];
//if (dot(buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l], buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]) != 0)
// printf("%d's phase %d %d's grad pressure is (%f,%f,%f)\n", i, k, l,
// buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].x, buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].y,
// buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].z);
}
//if (isnan(dot(buf.gradPressure[i], buf.gradPressure[i])))
//if(dot(buf.gradPressure[i*MAX_FLUIDNUM + 1], buf.gradPressure[i*MAX_FLUIDNUM + 1])!=0&&i%100==0)
// printf("particle %d's type is %d, grad pressure is (%f,%f,%f)\n",
// i, buf.MFtype[i], buf.gradPressure[i*MAX_FLUIDNUM + 1].x, buf.gradPressure[i*MAX_FLUIDNUM + 1].y, buf.gradPressure[i*MAX_FLUIDNUM + 1].z
// );
}
__device__ void contributeSolidDarcyFlux(int i, int cell, bufList buf)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2 / d2;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float3 pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//buf.MFtype[j]<=1
if (buf.MFtype[j] != buf.MFtype[i])
continue;
dist = (buf.mpos[i] - buf.mpos[j]); // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dist *= simData.psimscale;
dsq = sqrt(dsq*d2);
c = (simData.psmoothradius - dsq);
//if (buf.MFtype[i] == 1)
cmterm = c*c*simData.spikykern*buf.mf_restmass[j] * buf.density_solid[i];
//else
// cmterm = c*c*simData.spikykern*buf.mf_restmass[j] * buf.mdensity[i];
pmterm = dist / dsq*cmterm;
for (int k = 1; k<simData.mf_catnum; ++k)
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i]-2] -=
(buf.pressure_water[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2]
- buf.pressure_water[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2])*pmterm;
//normalize += cmterm;
}
}
__global__ void ComputeSolidDarcyFlux(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] <= 1)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
for (int k = 1; k<simData.mf_catnum; ++k)
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] = make_float3(0, 0, 0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributeSolidDarcyFlux(i, gc + simData.gridAdj[c], buf);
}
for (int k = 1; k < simData.mf_catnum; ++k)
{
//poro velocity
buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+buf.MFtype[i]-2]
*= simData.mf_permeability[k*MAX_SOLIDNUM+ buf.MFtype[i] - 2] / (simData.mf_visc[k] * simData.rest_porosity);
//buf.gradPressure[i*MAX_FLUIDNUM + k] += buf.mveleval[i];
}
}
__device__ void contributeFluidChange(int i, int cell, bufList buf)
{
float dsq, c;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
//int jndex;
float3 dist, vmr;
float cmterm, cmterm1;
// float massj;
float pmterm, vmterm;
// float q;
int j, mulj;
float aveDenij, cx, xterm;
float sum = 0;
//if (i % 100 == 0)
// printf("particle %d's gridcnt is %d\n", i,buf.mgridcnt[cell]);
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
//int index = buf.elasticID[i];
//int jndex,index = buf.elasticID[i];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] <= 1)
continue;
//jndex = buf.elasticID[j];
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dsq = sqrt(dsq);
c = simData.psmoothradius - dsq;
cmterm = c*c*simData.spikykern * buf.mf_restmass[j] * buf.density_solid[j];
for(int k=1;k<simData.mf_catnum;++k)
buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM+k*MAX_SOLIDNUM+buf.MFtype[j]-2] +=
dot(buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2], dist / dsq)*cmterm;
//normalize += cmterm;
}
return;
}
__global__ void ComputeFluidChange(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i] != 0)
return;
int gc = buf.mgcell[i];
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
if (gc == GRID_UNDEF) return;
gc -= nadj;
for (int k = 0; k<simData.mf_catnum; ++k)
for(int l=0;l<MAX_SOLIDNUM;++l)
buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributeFluidChange(i, gc + simData.gridAdj[c], buf);
}
for (int k = 1; k < simData.mf_catnum; ++k)
{
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] *= simData.mf_dt;
if (buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] == 0)
buf.poroVel[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = make_float3(0, 0, 0);
if (buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] > 0)
{
if (buf.mf_alpha[i*MAX_FLUIDNUM + k] - buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] < 0.001)
{
buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = buf.mf_alpha[i*MAX_FLUIDNUM + k];
}
}
if (buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] < 0)
{
if (buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] + buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] < 0.001)
buf.divDarcyFlux[i*MAX_FLUIDNUM *MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] = -buf.mf_beta[i*MAX_FLUIDNUM *MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
}
buf.mf_beta_next[i*MAX_FLUIDNUM *MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] += buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
buf.mf_alpha_next[i*MAX_FLUIDNUM + k] -= buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
//if (isnan(buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]))
//if(buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l]!=0)
// printf("particle %d's phase %d's div Darcy flux is %f, darcy flux is (%f,%f,%f)\n",
// i, k, buf.divDarcyFlux[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l],
// buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].x,
// buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].y,
// buf.gradPressure[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l].z);
}
}
//if (buf.mf_alpha[i*MAX_FLUIDNUM + 1] < buf.mf_alpha[i*MAX_FLUIDNUM + 2]-0.1&&!buf.isInside[i])
// printf("particle %d's alpha is (%f,%f), beta is (%f,%f), divDarcyFlux is (%f,%f)\n",
// i, buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2],
// buf.mf_beta[i*MAX_FLUIDNUM + 1], buf.mf_beta[i*MAX_FLUIDNUM + 2],
// buf.divDarcyFlux[i*MAX_FLUIDNUM + 1], buf.divDarcyFlux[i*MAX_FLUIDNUM + 2]);
}
__device__ void contributeFluidAdvance(int i, int cell, bufList buf, float3*gradBeta, float*DivVelocity)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float pmterm;
int j;
if (buf.mgridcnt[cell] == 0) return;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++) {
j = buf.mgrid[cndx];
if (buf.MFtype[j] != 0)
continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq = dot(dist, dist);
if (!(dsq < r2 && dsq > 0))
continue;
dsq = sqrt(dsq);
c = simData.psmoothradius - dsq;
for (int k = 1; k < simData.mf_catnum; ++k)
{
cmterm = c*c*simData.spikykern * buf.mf_restmass[j] * buf.mdensity[j];
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
DivVelocity[k*MAX_SOLIDNUM + l] += cmterm *
dot((buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l] * buf.poroVel[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l] +
buf.mf_beta[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l] * buf.poroVel[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l]), dist);
gradBeta[k*MAX_SOLIDNUM+l] += cmterm * (buf.mf_beta[j*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l] - buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM+l])*dist;
}
}
}
return;
}
__global__ void ComputeFluidAdvance(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float3 gradBeta[MAX_FLUIDNUM*MAX_SOLIDNUM];
float DivVelocity[MAX_FLUIDNUM*MAX_SOLIDNUM],betachange[MAX_FLUIDNUM*MAX_SOLIDNUM];
float sigma = 1;
for (int k = 1; k < simData.mf_catnum; ++k)
{
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
gradBeta[k*MAX_SOLIDNUM + l] = make_float3(0, 0, 0);
DivVelocity[k*MAX_SOLIDNUM + l] = 0;
}
}
for (int c = 0; c < simData.gridAdjCnt; c++)
{
contributeFluidAdvance(i, gc + simData.gridAdj[c], buf, gradBeta, DivVelocity);
}
//float betasum = 0;
for (int k = 1; k < simData.mf_catnum; ++k)
{
for (int l = 0; l < MAX_SOLIDNUM; ++l)
{
betachange[k*MAX_SOLIDNUM+l] = sigma*simData.mf_dt*(-DivVelocity[k*MAX_SOLIDNUM + l] + dot(buf.mveleval[i], gradBeta[k*MAX_SOLIDNUM + l]));
/*if (abs(betachange[k]) >= 0.0001)
printf("error! particle %d's beta change is (%f,%f)\n",
i, betachange[1], betachange[2]);*/
//betachange limit
if (betachange[k*MAX_SOLIDNUM + l] < -0.99)
{
betachange[k*MAX_SOLIDNUM + l] = -0.99;// * ((int)(buf.mf_alpha[muloffseti+fcount]>0)-(int)(buf.mf_alpha[muloffseti+fcount]<0));
}
//betasum += buf.mf_beta_next[i*MAX_FLUIDNUM + k];
buf.mf_beta_next[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l] += betachange[k*MAX_SOLIDNUM + l];
}
}
//if (i % 10000 == 0 && buf.solidCount[i]!=0)
// printf("particle %d's beta change is (%f,%f)\n",
// i, betachange[1], betachange[2]);
}
__device__ float3 contributeCapillaryForce(int i, int cell, bufList buf)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float3 pmterm;
int j, jndex;
float3 sum = make_float3(0,0,0);
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
//float kernel, betasum, kparm = 0.007 / pow(simData.psmoothradius, (float)(3.25));
float kernel, betasum, kparm = 8 / (3.1415926*pow(simData.psmoothradius, 3));
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] > 1)
{
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq2 = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq2 < r2 && dsq2 > 0))
continue;
q = sqrt(dsq2 / r2);
//if (q > 1||q==0)
// continue;
if (q <= 0.5)
kernel = 3*q*q-2*q;
else
kernel = -pow(1-q,2);
//kernel *= kparm;
dsq = sqrt(dsq2);
//c = simData.psmoothradius - dsq;
betasum = 0;
for (int k = 1; k < simData.mf_catnum; ++k)
betasum += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2];
sum += betasum*buf.mf_restmass[j] * buf.density_solid[j] * kernel *simData.gradCubicSplineKern * dist / dsq;
//betasum = 1;
//sum += -betasum * buf.mf_restdensity[i] * buf.density_solid[j] * dist / dsq * kernel;
//sum += betasum*buf.mf_restmass[j] * buf.density_solid[j] * c*c *simData.spikykern * dist / dsq;
//dsq = sqrt(dsq2);
//if (2 * dsq > simData.psmoothradius)
// continue;
//c = simData.psmoothradius - dsq;
//betasum = 0;
//for (int k = 1; k < simData.mf_catnum; ++k)
// betasum += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[j] - 2];
////sum += betasum * buf.mf_restmass[j] * c*c *simData.spikykern * dist / dsq;
// //betasum += buf.mf_alpha[i*MAX_FLUIDNUM + k] * simData.mf_dens[k];
//betasum = 1;
//kernel = pow((float)(2 * dsq - 4 * dsq*dsq / simData.psmoothradius), (float)0.25);
////kernel = sqrt(sqrt(6 * dsq - 2 * simData.psmoothradius - 4 * dsq*dsq / simData.psmoothradius));
//kernel *= kparm;
//sum += -betasum * buf.mf_restdensity[i] * buf.density_solid[j] * dist/dsq * kernel;
}
}
return sum;
}
__global__ void ComputeCapillaryForce(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float colorField = 0;
float3 normal = make_float3(0, 0, 0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
normal += simData.capillaryForceRatio * contributeCapillaryForce(i, gc + simData.gridAdj[c], buf);
}
if ( isnan(dot(normal, normal)))
printf("capillary force is (%f,%f,%f)\n", normal.x, normal.y, normal.z);
//colorField = dot(normal, normal);
//if (colorField > simData.AL2) {
// normal *= simData.AL / sqrt(colorField);
//}
buf.mforce[i] += normal;
buf.poroForce[i] += normal;
buf.maccel[i] = buf.mforce[i];
}
__device__ float3 contributeInnerBoundaryForce(int i, int cell, bufList buf, float betasum, float kparm)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
float3 pmterm;
int j, jndex;
float3 sum = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 1)
{
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq2 = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
//if (!(dsq2 < r2 && dsq2 > 0))
// continue;
dsq = sqrt(dsq2);
if (2 * dsq >= simData.psmoothradius)
continue;
cmterm = 0.5*buf.mf_visc[i]*simData.psmoothradius*buf.mdensity[i];
cmterm *= (max((float)0, dot(dist, -buf.mveleval[i]))) / (0.01*r2 + dsq2)*buf.density_solid[j];
//c = (simData.psmoothradius - dsq);
//if (buf.MFtype[i] == 1)
//cmterm *= c*c*simData.spikykern;
//if (2 * dsq - 4 * dsq2 / simData.psmoothradius < 0)
// continue;
cmterm *= kparm*pow(2 * dsq - 4 * dsq2 / simData.psmoothradius, (float)0.25);
//if (isnan(cmterm))
// continue;
sum += betasum*cmterm * dist / dsq;
}
}
return sum;
}
__global__ void ComputeInnerBoundaryForce(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float colorField = 0;
float betasum = 0;
for (int k = 1; k < simData.mf_catnum; ++k)
for(int l=1;l<=3;++l)
betasum += buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + l];
//betasum = 1;
if (betasum < 0.001)
return;
float kparm = 0.007 / pow(simData.psmoothradius, (float)(3.25));
//printf("beta sum%f\n", betasum);
float3 normal = make_float3(0, 0, 0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
normal += contributeInnerBoundaryForce(i, gc + simData.gridAdj[c], buf, betasum, kparm);
}
if (isnan(dot(normal,normal)))
printf("inner boundary force is (%f,%f,%f)\n", normal.x, normal.y, normal.z);
//colorField = dot(normal, normal);
//if (colorField > simData.AL2) {
// normal *= simData.AL / sqrt(colorField);
//}
buf.mforce[i] += normal;
buf.poroForce[i] += normal;
buf.maccel[i] = buf.mforce[i];
}
__device__ float3 contributeSurfaceTension2(int i, int cell, bufList buf)
{
float dsq, c, dsq2;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float cmterm;
int j, jndex;
float3 sum = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0) return sum;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] > 1)
{
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale; // dist in cm
dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z);
if (!(dsq < r2 && dsq > 0))
continue;
dsq = sqrt(dsq);
c = simData.psmoothradius - dsq;
cmterm = buf.mf_restmass[j] * buf.density_solid[j] * c*c / dsq*simData.spikykern;
//sum += (buf.pressure_water[i*MAX_FLUIDNUM] - buf.pressure_water[j*MAX_FLUIDNUM])*cmterm;
sum += (buf.pressure_water[i*MAX_FLUIDNUM])*cmterm;
}
}
return sum;
}
__global__ void ComputeSurfaceTension2(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum)
return;
if (buf.MFtype[i] != 0)
return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float colorField = 0;
float3 normal = make_float3(0, 0, 0);
float mor = 2/simData.CoCompressibility;
//float mor = 0.002;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
normal += mor * contributeSurfaceTension2(i, gc + simData.gridAdj[c], buf);
}
buf.mforce[i] += normal / buf.mf_restdensity[i];
//buf.poroForce[i] += (buf.mf_beta[i*MAX_FLUIDNUM + 1] + buf.mf_beta[i*MAX_FLUIDNUM + 2])*normal;
buf.maccel[i] = buf.mforce[i];
}
//capillary force exert on fluid particles
void ComputePorousForceCUDA()
{
cudaError_t error;
ComputeSolidDarcyFlux << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute poro velocity CUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputePoroVelocity << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute poro velocity CUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
//if(fcuda.example == 11)
// ComputeSurfaceTension2 << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//else
ComputeCapillaryForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute surface tension CUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
//if (fcuda.example != 6)
//{
//ComputeInnerBoundaryForce << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
//error = cudaGetLastError();
//if (error != cudaSuccess) {
// fprintf(stderr, "CUDA ERROR: compute surface tension CUDA: %s\n", cudaGetErrorString(error));
//}
//cudaThreadSynchronize();
//}
//fluid flow between fluids and solid surface
ComputeFluidFlux << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute fluid flux CUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
ComputeFluidChange << < fcuda.numBlocks, fcuda.numThreads >> > (fbuf, fcuda.pnum);
error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: compute fluid flux CUDA: %s\n", cudaGetErrorString(error));
}
cudaThreadSynchronize();
}
//**************************************************************************************************
//implicit incompressible SPH
__device__ float3 contributePressureForce(int i,float3 pos,int cell, bufList buf, int& count)
{
float3 force = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0)return force;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
float3 vmr;
float cmterm;
float3 vmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for(int cndx = cfirst;cndx < clast;cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[i] != buf.MFtype[j] && (!buf.misbound[i] && !buf.misbound[j]))
// continue;
/*if (buf.MFtype[i] == 1 && buf.MFtype[i] == buf.MFtype[j])
continue;*/
//if (buf.MFtype[i] == buf.MFtype[j] && buf.MFtype[i] == 1)
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
dsq = sqrt(dsq2);
if (dsq2 > r2 || dsq2 <= 0)
continue;
//if (buf.MFtype[i] == 1 && buf.MFtype[i] == buf.MFtype[j])
// continue;
count++;
c = simData.psmoothradius - dsq;
//cmterm = buf.mf_restmass[j] * (buf.mpress[i] * pow(buf.mdensity[i], 2) + buf.mpress[j] * pow(buf.mdensity[j], 2));
//force -= cmterm *c*c*dist*simData.spikykern/dsq;
//force += buf.volume[j]*c*c*simData.spikykern*dist / dsq*(buf.mpress[i] + buf.mpress[j]);
//pairwise pressure force
if(buf.volume[j] * buf.volume[i]!=0)
force += c*c*simData.spikykern*dist / dsq*buf.volume[j]* buf.volume[i]*(buf.mpress[j] + buf.mpress[i])/(buf.volume[i]+ buf.volume[j]);
}
return force;
}
//fluid pressure force
__global__ void ComputePressureForce(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
//if (i % 30000 == 0)
// printf("particle %d's type is %d, press is %.10f\n",
// i, buf.MFtype[i], buf.mpress[i]);
if (buf.misbound[i])
{
buf.mforce[i] = make_float3(0, 0, 0);
buf.maccel[i] = buf.mforce[i];
buf.pressForce[i] = make_float3(0, 0, 0);
return;
}
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
// Sum Pressures
float3 pos = buf.mpos[i];
//float dens = buf.mf_restdensity[i];
float3 force = make_float3(0, 0, 0);
int count = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
force += contributePressureForce(i, pos, gc + simData.gridAdj[c], buf, count);
}
//if (isnan(dot(force, force)))
//if(isnan(buf.volume[i])||isnan(buf.mpress[i]))
// printf("particle %d's type is %d, force is nan. press is %f, volume is %.10f,fluid percent is %f\n",
// i, buf.MFtype[i], buf.mpress[i], buf.volume[i], buf.fluidPercent[i]);
if(buf.MFtype[i] == 0)
buf.pressForce[i] = -buf.volume[i]/buf.mf_restmass[i]*force;
else
{
float mass = buf.mf_restmass[i];
//for (int k = 1; k < MAX_FLUIDNUM; ++k)
// mass += simData.stRatio * buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] * simData.mf_mass[k];
buf.pressForce[i] = -buf.volume[i] / mass * force;
}
//if (dot(buf.mforce[i], buf.mforce[i]) > 10000)
// printf("particle %d's type is %d, pressure force is (%f,%f,%f), pressure is %f\n",
// i, buf.MFtype[i], buf.mforce[i].x, buf.mforce[i].y, buf.mforce[i].z,
// buf.mpress[i]);
//if(isnan(dot(buf.mforce[i],buf.mforce[i])))
//if (dot(buf.mforce[i],buf.mforce[i])>10 && !buf.misbound[i])
// printf("particle %d's type is %d, pressure force is (%.10f,%.10f,%.10f),count is %d, press is %.10f, aii is %.10f, deltadensity is %.10f, rest mass is %.10f, volume is %.10f\n",
// i, buf.MFtype[i], buf.mforce[i].x, buf.mforce[i].y, buf.mforce[i].z, count, buf.mpress[i],buf.aii[i], buf.delta_density[i],buf.mf_restmass[i],buf.volume[i]);
//if (i % 30000 == 0)
// printf("volume is %.10f, m/rho is %.10f\n", buf.volume[i], buf.mf_restmass[i] * buf.mdensity[i]);
}
//fluid pressure force
__global__ void ApplyPressureForce(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.misbound[i])
{
buf.mforce[i] = make_float3(0, 0, 0);
buf.maccel[i] = buf.mforce[i];
return;
}
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
/*for (uint fcount = 0; fcount<simData.mf_catnum; fcount++)
{
buf.mf_alphagrad[i*MAX_FLUIDNUM + fcount] = make_float3(0, 0, 0);
buf.mf_alpha[i*MAX_FLUIDNUM + fcount] = buf.mf_alpha_next[i*MAX_FLUIDNUM + fcount];
buf.mf_beta[i*MAX_FLUIDNUM + fcount] = buf.mf_beta_next[i*MAX_FLUIDNUM + fcount];
}*/
// Sum Pressures
float3 pos = buf.mpos[i];
//float dens = buf.mf_restdensity[i];
float3 force = make_float3(0, 0, 0);
int count = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
force += contributePressureForce(i, pos, gc + simData.gridAdj[c], buf, count);
}
/*if(i%10000==0)
printf("particle %d's type is %d,source is %f, aii is %.10f,press is %f, vel is (%f,%f,%f),volume is %.10f,rest volume is %.10f,press force is (%f,%f,%f),alpha is (%f,%f,%f),beta is (%f,%f,%f)\n",
i, buf.MFtype[i], buf.source[i], buf.aii[i], buf.mpress[i],
buf.vel_mid[i].x, buf.vel_mid[i].y, buf.vel_mid[i].z,
buf.volume[i], buf.rest_volume[i], buf.pressForce[i].x, buf.pressForce[i].y, buf.pressForce[i].z,
buf.mf_alpha[i*MAX_FLUIDNUM + 0], buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2],
buf.mf_beta[i*MAX_FLUIDNUM + 0], buf.mf_beta[i*MAX_FLUIDNUM + 1], buf.mf_beta[i*MAX_FLUIDNUM + 2]);
*/
buf.pressForce[i] = -buf.volume[i] / buf.mf_restmass[i] * force;
if(buf.MFtype[i] == 0)
buf.mforce[i] += -buf.volume[i] / buf.mf_restmass[i] * force;
else
{
float mass = buf.mf_restmass[i];
//for (int k = 1; k < MAX_FLUIDNUM; ++k)
// mass += simData.stRatio * buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] * simData.mf_mass[k];
//if (buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2] != 0)
// printf("type %d's fluid beta %d is %f\n", buf.MFtype[i] - 2, k, buf.mf_beta[i*MAX_FLUIDNUM*MAX_SOLIDNUM + k*MAX_SOLIDNUM + buf.MFtype[i] - 2]);
buf.mforce[i] += -buf.volume[i] / mass * force;
}
buf.fluidForce[i] = -buf.volume[i] / buf.mf_restmass[i] * force;
//if (i % 10 == 0 && buf.isInside[i])
// printf("p %d's press force is (%f,%f,%f)\n",
// i, buf.fluidForce[i].x, buf.fluidForce[i].y, buf.fluidForce[i].z);
//if (dot(buf.mforce[i], buf.mforce[i]) > 10000)
// printf("particle %d's type is %d, pressure force is (%f,%f,%f), pressure is %f\n",
// i, buf.MFtype[i], buf.mforce[i].x, buf.mforce[i].y, buf.mforce[i].z,
// buf.mpress[i]);
buf.maccel[i] = buf.mforce[i];
if(isnan(dot(buf.mforce[i],buf.mforce[i])))
//if (dot(buf.mforce[i],buf.mforce[i])>10 && !buf.misbound[i])
printf("particle %d's type is %d, pressure force is (%.10f,%.10f,%.10f),count is %d, press is %.10f, aii is %.10f, deltadensity is %.10f, rest mass is %.10f, volume is %.10f\n",
i, buf.MFtype[i], buf.mforce[i].x, buf.mforce[i].y, buf.mforce[i].z, count, buf.mpress[i],buf.aii[i], buf.delta_density[i],buf.mf_restmass[i],buf.volume[i]);
//if (i % 30000 == 0)
// printf("volume is %.10f, m/rho is %.10f\n", buf.volume[i], buf.mf_restmass[i] * buf.mdensity[i]);
}
__device__ float3 contributeViscosity(int i, int muli, float idens, float3 pos, int cell, bufList buf, float* ialpha_pre, float3* ivmk)
{
float3 force = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0)return force;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist, sf;
float c, dsq2, dsq;
int j, mulj;
float3 vmr;
float cmterm, vmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float xvprod, phiij, densityij, PIij, q;
float3 fP;
float cmterm1, vmterm1;
float viscoRatio = 1;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.misbound[j])
continue;
mulj = j * MAX_FLUIDNUM;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
if (dsq2 <= 0 || dsq2 >= r2)
continue;
dsq = sqrt(dsq2);
vmr = buf.mveleval[i] - buf.mveleval[j];
//viscosity
c = (simData.psmoothradius - dsq);
cmterm1 = simData.spikykern * c * c / dsq * buf.mf_restmass[j] * buf.density_solid[j];
vmterm1 = cmterm1 * (buf.mf_visc[i] + buf.mf_visc[j]) * idens;
//if ((buf.MFtype[i] == 4 && buf.MFtype[j] == 4))
// force += vmterm1 * vmr;
if (buf.MFtype[i] == buf.MFtype[j])
{
//if (buf.MFtype[i] != 0)
// force += viscoRatio * vmterm1 * vmr;
if (buf.MFtype[i] == 0)
{
float fluidsum = buf.mf_alpha_sum[i] * buf.mf_alpha_sum[j];
if (fluidsum <= 0.01)
fluidsum = 0;
else
fluidsum /= (buf.mf_alpha_sum[i] + buf.mf_alpha_sum[j]);
float fluidsum2 = (1 - buf.mf_alpha_sum[i])*(1 - buf.mf_alpha_sum[j]);
if (fluidsum2 <= 0.01)
fluidsum2 = 0;
else
fluidsum2 /= (2 - buf.mf_alpha_sum[i] - buf.mf_alpha_sum[j]);
//if (_example == 2)
// fluidsum2 = 0;
//force += (fluidsum + fluidsum2) * vmterm * dist / dsq;
force += (fluidsum + fluidsum2) * vmterm1 * vmr;
}
}
//else
//{
// float fluidsum = 0;
// if (buf.MFtype[i] == 0)
// fluidsum = buf.mf_alpha_sum[i];
// if (buf.MFtype[j] == 0)
// fluidsum = buf.mf_alpha_sum[j];
// force += fluidsum * vmterm1 * vmr;
//}
//if(buf.MFtype[i] + buf.MFtype[j] == 9)
// force += viscoRatio * vmterm1 * vmr;
}
return force;
}
__global__ void ComputeOtherForce(bufList buf, int pnum, float time)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.misbound[i])return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
bool error = false;
// Sum Pressures
float3 pos = buf.mpos[i];
float dens = buf.mf_restdensity[i];
float3 force = make_float3(0, 0, 0);
float normalize = 0;
register uint muloffseti = i * MAX_FLUIDNUM;
register float alpha[MAX_FLUIDNUM];
register float3 ivmk[MAX_FLUIDNUM];
for (uint fcount = 0; fcount < simData.mf_catnum; fcount++)
{
//buf.mf_alphagrad[i*MAX_FLUIDNUM + fcount] = make_float3(0, 0, 0);
alpha[fcount] = buf.mf_alpha_next[muloffseti + fcount];
//buf.mf_alpha_pre[i*MAX_FLUIDNUM + fcount] = buf.mf_alpha[i*MAX_FLUIDNUM + fcount];
ivmk[fcount] = buf.mf_vel_phrel[muloffseti + fcount];
}
for (int c = 0; c < simData.gridAdjCnt; c++)
{
force += contributeViscosity(i, muloffseti, buf.mdensity[i], pos, gc + simData.gridAdj[c], buf, alpha, ivmk);
}
//if (dot(force, force) > 10)
// printf("particle %d's viscosity force is (%f,%f,%f)\n",
// i, force.x, force.y, force.z);
//bound force and gravity
//buf.mforce[i] += getBoundForce(i, buf, force, time);
buf.mforce[i] = force;
buf.fluidForce[i] = force;
buf.maccel[i] = buf.mforce[i];
/*if (buf.MFtype[i] == 0)
{
buf.mforce[i] *= 1-buf.absorbedPercent[i];
buf.maccel[i] *= 1-buf.absorbedPercent[i];
}*/
if (isnan(dot(force,force)))
printf("particle %d's type is %d,visco force is (%f,%f,%f),pos is (%f,%f,%f), alpha sum is %f\n",
i, buf.MFtype[i], buf.mforce[i].x, buf.mforce[i].y, buf.mforce[i].z,
buf.mpos[i].x, buf.mpos[i].y, buf.mpos[i].z, buf.mf_alpha_sum[i]);
}
__device__ float contributeColorValue(int i, float3 pos, int cell, bufList buf)
{
if (buf.mgridcnt[cell] == 0)return 0;
float sum = 0;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j, mulj;
float pmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 2 || (_example == 2 && buf.MFtype[j] >= 2))
continue;
mulj = j * MAX_FLUIDNUM;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
q = sqrt(dsq2 / r2);
if (q>2)
continue;
if (q >= 0 && q <= 1)
pmterm = simData.CubicSplineKern2*(1 - 1.5*q*q*(1 - q / 2));
else
pmterm = simData.CubicSplineKern1*pow(2 - q, 3);
sum += pmterm * (buf.rest_colorValue[j]) * buf.mf_restmass[j] * buf.mdensity[j];
}
return sum;
}
__global__ void ComputeColorValue(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i] == 2)return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
bool error = false;
// Sum Pressures
float3 pos = buf.mpos[i];
buf.colorValue[i] = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
buf.colorValue[i] += contributeColorValue(i, pos, gc + simData.gridAdj[c], buf);
}
}
__device__ float3 contributeColorTensor(int i, int cell, bufList buf, float &sigma)
{
float3 sum = make_float3(0, 0, 0);
if (buf.mgridcnt[cell] == 0)return sum;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j, mulj;
float pmterm, cmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[j] == 2 || (_example == 2 && buf.MFtype[j] >= 2))
continue;
mulj = j * MAX_FLUIDNUM;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
if (dsq2 > r2 ||dsq2 <=0)
continue;
dsq = sqrt(dsq2);
c = simData.psmoothradius - dsq;
cmterm = c*c*simData.spikykern / dsq;
pmterm = pow(r2 - dsq2, 3)*simData.poly6kern;
sum += cmterm * buf.colorValue[j] * buf.mf_restmass[j] * buf.mdensity[j] * dist;
sigma += pmterm;
}
return sum;
}
__global__ void ComputeColorTensor(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i]!=0)return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
bool error = false;
// Sum Pressures
float3 pos = buf.mpos[i];
for (int k = 0; k < 9; ++k)
buf.colorTensor[i * 9 + k] = 0;
float3 gradCV = make_float3(0, 0, 0);
float sigma = 0, divCV;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
gradCV += contributeColorTensor(i, gc + simData.gridAdj[c], buf, sigma);
}
divCV = dot(gradCV, gradCV);
if ((sqrt(divCV)) < 0.000000001)
{
for (int k = 0; k < 9; ++k)
buf.colorTensor[i * 9 + k] = 0;
return;
}
tensorProduct(gradCV, gradCV, buf.colorTensor + i * 9);
for (int m = 0; m < 3; ++m)
{
for (int n = 0; n < 3; ++n)
if (m == n)
buf.colorTensor[i * 9 + m * 3 + n] = divCV / 3 - buf.colorTensor[i * 9 + m * 3 + n];
else
buf.colorTensor[i * 9 + m * 3 + n] = - buf.colorTensor[i * 9 + m * 3 + n];
}
//if(abs(divCV) > 1)
////if (i % 1000 == 0 || isnan(buf.colorValue[i]))
// //printf("%d's color value is %f, gradCV is (%f,%f,%f)\n", i, buf.colorValue[i], gradCV.x, gradCV.y, gradCV.z);
// printf("%d's color tensor is (%f,%f,%f)(%f,%f,%f)(%f,%f,%f), gradCV is (%f,%f,%f), sigma is %f\n", i,
// buf.colorTensor[i * 9 + 0], buf.colorTensor[i * 9 + 1], buf.colorTensor[i * 9 + 2],
// buf.colorTensor[i * 9 + 3], buf.colorTensor[i * 9 + 4], buf.colorTensor[i * 9 + 5],
// buf.colorTensor[i * 9 + 6], buf.colorTensor[i * 9 + 7], buf.colorTensor[i * 9 + 8], gradCV.x, gradCV.y, gradCV.z,
// sigma);
for (int k = 0; k<9; ++k)
{
buf.colorTensor[i * 9 + k] *= simData.stRatio / (sqrt(divCV)*sigma*sigma);
}
}
//__device__ float3 contributeDijPj(int i, float3 pos, int cell, bufList buf)
//{
// float3 DijPj = make_float3(0,0,0);
// if (buf.mgridcnt[cell] == 0)return DijPj;
//
// register float d2 = simData.psimscale * simData.psimscale;
// register float r2 = simData.r2;
// float3 dist;
// float c, dsq2, dsq;
// int j;
// float3 dji;
// float cmterm;
// float3 vmterm;
// int cfirst = buf.mgridoff[cell];
// int clast = cfirst + buf.mgridcnt[cell];
// float q;
// for (int cndx = cfirst; cndx < clast; cndx++)
// {
// j = buf.mgrid[cndx];
// dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
// dsq2 = dot(dist, dist);
// dsq = sqrt(dsq2);
// //q = dsq / simData.psmoothradius;
// //if (q >= 2 || q <= 0)
// // continue;
// //cmterm = buf.mf_restmass[j] * pow(buf.mdensity[j], 2)*buf.mpress[j];
// //if(q>1)
// //{
// // vmterm = simData.gradCubicSplineKern1*(2 - q)*(2 - q)*dist;
// // DijPj += cmterm*vmterm;
// //}
// //else
// //{
// // vmterm = simData.gradCubicSplineKern2*(2.25*q*q - 3 * q)*dist;
// // DijPj += cmterm*vmterm;
// //}
// if (dsq2 > r2 || dsq2 <= 0)
// continue;
// c = (simData.psmoothradius - dsq);
// cmterm = buf.mf_restmass[j] * pow(buf.mdensity[j], 2)*buf.mpress[j];
// DijPj += c*c*dist *cmterm*simData.spikykern/dsq;
// //DijPj += buf.mpress[j]*c*c*simData.spikykern*buf.mf_restmass[j] * pow(buf.mdensity[j], 2)*dist;
// //DijPj += -buf.mf_restmass[j] * pow()
// }
// return DijPj;
//}
//__global__ void ComputeDijPj(bufList buf, int pnum)
//{
// uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
// if (i >= pnum) return;
//
// // Get search cell
// int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
// uint gc = buf.mgcell[i];
// if (gc == GRID_UNDEF) return; // particle out-of-range
// gc -= nadj;
// bool error = false;
// // Sum Pressures
// float3 pos = buf.mpos[i];
// float dens = buf.mf_restdensity[i];
// buf.DijPj[i] = make_float3(0,0,0);
// for (int c = 0; c < simData.gridAdjCnt; c++)
// {
// buf.DijPj[i] += contributeDijPj(i, pos, gc + simData.gridAdj[c], buf);
// }
// buf.DijPj[i] *= -simData.mf_dt*simData.mf_dt;
// //if (i % 20000 == 0)
// // printf("particle %d's dijpj is (%f,%f,%f),press is %f\n",
// // i, buf.DijPj[i].x, buf.DijPj[i].y, buf.DijPj[i].z, buf.mpress[i]);
//}
//__global__ void updatePress(bufList buf, int pnum)
//{
// uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
// if (i >= pnum) return;
//
// // Get search cell
// int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
// uint gc = buf.mgcell[i];
// if (gc == GRID_UNDEF) return; // particle out-of-range
// gc -= nadj;
// bool error = false;
// // Sum Pressures
// float3 pos = buf.mpos[i];
// float dens = buf.mf_restdensity[i];
// float omega = 0.5;
// buf.mpress_pre[i] = (1 - omega) * buf.mpress[i];
// float sum = 0;
// for (int c = 0; c < simData.gridAdjCnt; c++)
// {
// sum += contributePressureIteration(i, pos, gc + simData.gridAdj[c], buf);
// }
// float delta = buf.mf_restdensity[i] - buf.inter_density[i] - sum;
// if (buf.aii[i] == 0)
// buf.mpress_pre[i] = buf.mpress[i];
// else
// buf.mpress_pre[i] += omega / buf.aii[i] * (delta);
//
// //if (buf.mpress_pre[i] < 0)
// // buf.mpress_pre[i] = 0;
// //if (i % 40000 == 0)
// // printf("aii is %.10f\n", buf.aii[i]);
// // printf("particle %d's press is %.10f,new press is %.10f, sum is %.10f, inter_density is %.10f,initial density is %f, aii is %.10f,delta is %.10f\n",
// // i, buf.mpress[i], buf.mpress_pre[i], sum, buf.inter_density[i],1/buf.mdensity[i], buf.aii[i],delta);
//}
//__global__ void applyPress(bufList buf, int pnum)
//{
// uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
// if (i >= pnum) return;
//
// // Get search cell
// int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
// uint gc = buf.mgcell[i];
// if (gc == GRID_UNDEF) return; // particle out-of-range
// gc -= nadj;
// if (buf.mpress_pre[i] < 0)
// buf.mpress_pre[i] = 0;
// buf.mpress[i] = buf.mpress_pre[i];
// //if (i % 2000==0)
// // printf("particle %d's press is %f\n", i, buf.mpress[i]);
//}
__device__ float contributeCriterion(int i, int cell, bufList buf)
{
float sum = 0;
if (buf.mgridcnt[cell] == 0)return sum;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
float3 delta_force;
float3 cmterm, vmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[i] != buf.MFtype[j] && (!buf.misbound[i] && !buf.misbound[j]))
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
dsq = sqrt(dsq2);
if (dsq2 > r2 || dsq2 <= 0)
continue;
//if (buf.MFtype[i] == 1 && buf.MFtype[i] == buf.MFtype[j])
// continue;
c = simData.psmoothradius - dsq;
//delta_force = buf.mf_restmass[j] * (buf.mforce[i] - buf.mforce[j]);
//sum += dot(delta_force, dist)*c*c*simData.spikykern/dsq;
//compute Ap
//cmterm = buf.volume[j] * (buf.mforce[i] - buf.mforce[j]);
//pairwise Ap
if (buf.volume[i] * buf.volume[j] != 0)
cmterm = buf.volume[i] * buf.volume[j] /(buf.volume[j]+buf.volume[i])* (buf.pressForce[i] - buf.pressForce[j]);
else
continue;
sum += dot(cmterm, dist / dsq)*c*c*simData.spikykern;
}
return sum;
}
__global__ void ComputeCriterion(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
//if (buf.MFtype[i] == 3)return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float sum = 0;
float omega;
omega = 0.5*buf.rest_volume[i] / pow(simData.psmoothradius / 2, 3);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
sum += contributeCriterion(i, gc + simData.gridAdj[c], buf);
}
sum *= pow(simData.mf_dt, 2);
buf.delta_density[i] = buf.source[i] - sum;
float p = buf.mpress[i];
if (abs(buf.aii[i]) != 0)
buf.mpress[i] = buf.mpress[i] + omega*buf.delta_density[i] / buf.aii[i];
//float fluidsum = 0;
//for (int k = 0; k < simData.mf_catnum; ++k)
// fluidsum += buf.mf_alpha[i*MAX_FLUIDNUM + k];
//if(isnan(buf.delta_density[i]))
//if (buf.mpress[i]!=0)
//if(buf.mpress[i]>1000000||isnan(buf.mpress[i]))
//if(abs(buf.delta_density[i])>1)
//if(buf.mpos[i].y<-5)
//printf("particle %d's type is %d, Ap is %f,source is %f, aii is %.10f,press is %f,press pre is %.10f, vel is (%f,%f,%f),volume is %.10f,rest volume is %.10f,press force is (%f,%f,%f),alpha is (%f,%f,%f),beta is (%f,%f,%f)\n",
// i, buf.MFtype[i], sum, buf.source[i], buf.aii[i], buf.mpress[i], p,
// buf.vel_mid[i].x, buf.vel_mid[i].y,buf.vel_mid[i].z,
// buf.volume[i],buf.rest_volume[i], buf.pressForce[i].x, buf.pressForce[i].y, buf.pressForce[i].z,
// buf.mf_alpha[i*MAX_FLUIDNUM + 0], buf.mf_alpha[i*MAX_FLUIDNUM + 1], buf.mf_alpha[i*MAX_FLUIDNUM + 2],
// buf.mf_beta[i*MAX_FLUIDNUM + 0], buf.mf_beta[i*MAX_FLUIDNUM + 1], buf.mf_beta[i*MAX_FLUIDNUM + 2]);
if (buf.mpress[i] < 0)
buf.mpress[i] = 0;
//if (buf.misbound[i] == 0)
//{
// if (buf.mpress[i] > 10000)
// buf.mpress[i] = 10000;
//}
//else
//{
if (buf.mpress[i] > 1000000)
buf.mpress[i] = 1000000;
//}
}
//************************************************************************
//pressure boundary for IISPH
__device__ float contributeBRestVolume(int i, int cell, bufList buf)
{
float sum = 0;
if (buf.mgridcnt[cell] == 0)return sum;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
if (buf.MFtype[i]!=buf.MFtype[j])
continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
//dsq = sqrt(dsq2);
if (dsq2 > r2 || dsq2 <= 0)
continue;
c = r2 - dsq2;
sum += pow(c, 3)*simData.poly6kern;
}
return sum;
}
__global__ void ComputeBRestVolume(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
if (buf.MFtype[i]==0)
{
float sum = 0;
for (int k = 1; k < simData.mf_catnum; ++k)
sum += buf.mf_alpha[i*MAX_FLUIDNUM+k];
buf.rest_volume[i] = sum*pow(simData.psmoothradius / 2, 3);
if (isnan(sum))
printf("error:sum is nan! fluid percent is (%f,%f,%f)\n",
buf.mf_alpha[i*MAX_FLUIDNUM + 0],
buf.mf_alpha[i*MAX_FLUIDNUM + 1],
buf.mf_alpha[i*MAX_FLUIDNUM + 2]);
return;
}
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float sum = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
sum += contributeBRestVolume(i, gc + simData.gridAdj[c], buf);
}
sum += pow(simData.r2, 3)*simData.poly6kern;
buf.rest_volume[i] = simData.solid_pfactor / sum;
}
__device__ float contributeVolume(int i, int cell, bufList buf)
{
float sum = 0;
if (buf.mgridcnt[cell] == 0)return sum;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[i] != buf.MFtype[j] && (!buf.misbound[i] && !buf.misbound[j]))
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
//dsq = sqrt(dsq2);
if (dsq2 > r2 || dsq2 <= 0)
continue;
c = r2 - dsq2;
sum += buf.rest_volume[j] * pow(c, 3)*simData.poly6kern;
}
return sum;
}
__global__ void ComputeVolume(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float sum = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
sum += contributeVolume(i, gc + simData.gridAdj[c], buf);
}
sum += buf.rest_volume[i] * pow(simData.r2, 3)*simData.poly6kern;
//if (i % 30000 == 0)
// printf("volume sum is %.10f, 0.15*pow(simData.psmoothradius / 2, 3) is %.10f,rest_volume is %.10f\n",
// sum, 0.15 * pow(simData.psmoothradius / 2, 3), buf.rest_volume[i]);
//if (buf.MFtype[i] != 0)
if(buf.misbound[i])
sum += 0.15*pow(simData.psmoothradius / 2, 3);
if (sum == 0)
buf.volume[i] = 0;
else
buf.volume[i] = buf.rest_volume[i] / sum;
//if (buf.MFtype[i] == 0)
// buf.volume[i] *= buf.fluidPercent[i];
//if (i % 30000 == 0)
//if(buf.misbound[i]&&i%10000==0)
//if (isnan(buf.volume[i]))
//{
// float fluidsum = 0;
// for (int k = 0; k < simData.mf_catnum; ++k)
// fluidsum += buf.mf_fluidPercent[i*MAX_FLUIDNUM + k];
// printf("particle %d's type is %d, rest_volume is %.10f, volume is %.10f, h3 is %.10f, sum is %.10f, fluidpercent is %f\n",
// i, buf.MFtype[i], buf.rest_volume[i], buf.volume[i], 2500000 * pow(simData.psmoothradius / 2, 3), sum, fluidsum);
//}
}
__device__ float contributeSource(int i, int cell, bufList buf)
{
float sum = 0;
if (buf.mgridcnt[cell] == 0)return sum;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float3 velocity,cmterm;
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[i] != buf.MFtype[j] && (!buf.misbound[i] && !buf.misbound[j]))
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
dsq = sqrt(dsq2);
if (dsq2 > r2 || dsq2 <= 0)
continue;
if (buf.MFtype[i] == 1 && buf.MFtype[i] == buf.MFtype[j])
continue;
//if(_example == 2 && (buf.MFtype[i] == buf.MFtype[j]) && buf.MFtype[i])
c = simData.psmoothradius - dsq;
//velocity = buf.vel_mid[i] - buf.vel_mid[j];
//velocity = buf.mveleval[i] - buf.mveleval[j];
//if(buf.MFtype[j]==0)
// velocity *= buf.fluidPercent[j]*buf.volume[j];
//else
// velocity *= buf.volume[j];
//pairwise divergence velocity
if (buf.volume[i] * buf.volume[j] != 0)
velocity = buf.volume[i] * buf.volume[j] / (buf.volume[i] + buf.volume[j]) * (buf.vel_mid[i] - buf.vel_mid[j]);
else
continue;
cmterm = c*c*dist / dsq*simData.spikykern;
sum += -dot(velocity, cmterm);
}
return sum;
}
__global__ void ComputeSource(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
float sum = 0;
for (int c = 0; c < simData.gridAdjCnt; c++)
{
sum += contributeSource(i, gc + simData.gridAdj[c], buf);
}
if (buf.MFtype[i] == 0)
{
if(buf.volume[i] == 0)
buf.source[i] = buf.mf_alpha_sum[i]*simData.mf_dt*sum;
else
buf.source[i] = (1
- buf.rest_volume[i] / buf.volume[i]
+ simData.mf_dt*sum)*buf.mf_alpha_sum[i];
}
else
buf.source[i] = 1 - buf.rest_volume[i] / buf.volume[i] + simData.mf_dt*sum;
//if(isnan(buf.source[i]))
/*if (i % 30000 == 0&&buf.MFtype[i]==0)
printf("particle %d's source is %f, fluidsum is %f,cat num is %d, rest_volume is %.10f, buf.volume is %.10f, velocity divergence is %.10f, mid vel is (%f,%f,%f)\n",
i, buf.source[i], fluidsum, simData.mf_catnum, buf.rest_volume[i], buf.volume[i], simData.mf_dt*sum,
buf.vel_mid[i].x, buf.vel_mid[i].y, buf.vel_mid[i].z);*/
}
__device__ float contributeAIIfluid(int i, float3 pos, int cell, bufList buf, float3&sum1, int&count)
{
if (buf.mgridcnt[cell] == 0)return 0;
float sum2 = 0;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
float3 dji;
float cmterm;
float3 vmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
//if (buf.MFtype[i] != buf.MFtype[j] && (!buf.misbound[i] && !buf.misbound[j]))
// continue;
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
dsq = sqrt(dsq2);
//spiky kern
if (dsq2 > r2 || dsq2 <= 0)
continue;
c = (simData.psmoothradius - dsq);
//pressure boundary
count++;
//if(buf.MFtype[i]==0||buf.MFtype[i]!=buf.MFtype[j])
sum1 += buf.volume[j] * c*c*simData.spikykern*dist / dsq;
if (!buf.misbound[j]) {
if (buf.volume[j] == 0)
sum2 += 0;
else
sum2 += buf.volume[j] * buf.volume[j] / buf.mf_restmass[j]
* pow(c*c*simData.spikykern, 2);
}
//sum2 += buf.volume[j] * buf.volume[j] / (buf.mf_restmass[j]*(1-buf.absorbedPercent[i]))
// * pow(c*c*simData.spikykern, 2);
}
return sum2;
}
__device__ float contributeAIIsolid(int i, float3 pos, int cell, bufList buf)
{
if (buf.mgridcnt[cell] == 0)return 0;
float sum = 0;
register float d2 = simData.psimscale * simData.psimscale;
register float r2 = simData.r2;
float3 dist;
float c, dsq2, dsq;
int j;
float3 dji;
float cmterm;
float3 vmterm;
int cfirst = buf.mgridoff[cell];
int clast = cfirst + buf.mgridcnt[cell];
float q;
for (int cndx = cfirst; cndx < clast; cndx++)
{
j = buf.mgrid[cndx];
dist = (buf.mpos[i] - buf.mpos[j])*simData.psimscale;
dsq2 = dot(dist, dist);
dsq = sqrt(dsq2);
//spiky kern
if (dsq2 > r2 || dsq2 <= 0)
continue;
c = (simData.psmoothradius - dsq);
//iisph
/*c = (simData.psmoothradius - dsq);
cmterm = dot(buf.dii[i], dist)*buf.mf_restmass[j] * c*c*simData.spikykern / dsq;
buf.aii[i] += cmterm;
vmterm = pow(simData.mf_dt, 2)*buf.mf_restmass[i]
* pow(buf.mdensity[i], 2) *c*c*simData.spikykern *dist /dsq;
vmterm *= c*c*simData.spikykern/dsq*buf.mf_restmass[j];
buf.aii[i] -= dot(vmterm, dist);*/
//pressure boundary
if (!buf.misbound[j]) {
sum += buf.volume[j] * buf.volume[j] / buf.mf_restmass[j] * pow(c*c*simData.spikykern, 2);
}
}
return sum;
}
__global__ void ComputeAII(bufList buf, int pnum)
{
uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index
if (i >= pnum) return;
// Get search cell
int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1;
uint gc = buf.mgcell[i];
if (gc == GRID_UNDEF) return; // particle out-of-range
gc -= nadj;
bool error = false;
// Sum Pressures
float3 pos = buf.mpos[i];
float dens = buf.mf_restdensity[i];
buf.aii[i] = 0;
int count = 0;
float3 sum1 = make_float3(0, 0, 0);
for (int c = 0; c < simData.gridAdjCnt; c++)
{
if (!buf.misbound[i])
//if(buf.MFtype[i]==0)
buf.aii[i] += contributeAIIfluid(i, pos, gc + simData.gridAdj[c], buf, sum1, count);
else
buf.aii[i] += contributeAIIsolid(i, pos, gc + simData.gridAdj[c], buf);
}
float mass = buf.mf_restmass[i];
buf.aii[i] += dot(sum1, sum1) / mass;
//pressure boundary
buf.aii[i] *= -simData.mf_dt*simData.mf_dt*buf.volume[i];
buf.mpress[i] = 0;
}
|
1505d3d02ecfa300c93d3c21d55918048f4ee1ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hip/hip_complex.h>
#include "common.h"
#include "metrix.h"
/* 128x128 bins, 8x8-blocks 16x16 threads each.
BU - u-number of bin
BV - v-number of bin
*/
template <
int grid_size
, int bstep
, bool is_half_gcf
>
__device__ __inline__ static void grid_kernel_gather(
int BU
, int BV
, const Pregridded * uvo
, const Double4c * vis
, int num_of_vals
, const hipDoubleComplex * gcf[]
, Double4c grid[grid_size][grid_size]
){
int gu = blockIdx.x * blockDim.x + threadIdx.x + BU * bstep;
int gv = blockIdx.y * blockDim.y + threadIdx.y + BV * bstep;
for (int i = 0; i < num_of_vals; i++){
int du, dv, supp;
du = gu - uvo[i].u;
dv = gv - uvo[i].v;
supp = uvo[i].gcf_layer_supp;
// We have input u and v translated by -supp/2!
if (du >= 0 && dv >= 0 && du < supp && dv < supp) {
complexd supportPixel;
#define __layeroff du * supp + dv
if (is_half_gcf) {
int index = uvo[i].gcf_layer_index;
// Negative index indicates that original w was mirrored
// and we shall negate the index to obtain correct
// offset *and* conjugate the result.
if (index < 0) {
supportPixel = gcf[-index][__layeroff];
supportPixel.y = - supportPixel.y;
} else {
supportPixel = gcf[index][__layeroff];
}
} else {
supportPixel = gcf[uvo[i].gcf_layer_index][__layeroff];
}
#define __ADDPOL(pol) grid[gu][gv].pol = cuCfma(supportPixel, vis[i].pol, grid[gu][gv].pol);
__ADDPOL(XX)
__ADDPOL(XY)
__ADDPOL(YX)
__ADDPOL(XX)
}
}
}
#define gridKernelGather(suff, ishalf) \
extern "C" __global__ void gridKernelGather##suff( \
const Pregridded * uvo \
, const Double4c * vis \
, const hipDoubleComplex * gcf[] \
, Double4c grid[GRID_SIZE][GRID_SIZE] \
, int BU \
, int BV \
, int num_of_vals \
) { \
grid_kernel_gather<GRID_SIZE, BSTEP, ishalf>( \
BU \
, BV \
, uvo \
, vis \
, num_of_vals \
, gcf \
, grid \
); \
}
gridKernelGather(HalfGCF, true)
gridKernelGather(FullGCF, false)
| 1505d3d02ecfa300c93d3c21d55918048f4ee1ef.cu | #include <cuComplex.h>
#include "common.h"
#include "metrix.h"
/* 128x128 bins, 8x8-blocks 16x16 threads each.
BU - u-number of bin
BV - v-number of bin
*/
template <
int grid_size
, int bstep
, bool is_half_gcf
>
__device__ __inline__ static void grid_kernel_gather(
int BU
, int BV
, const Pregridded * uvo
, const Double4c * vis
, int num_of_vals
, const cuDoubleComplex * gcf[]
, Double4c grid[grid_size][grid_size]
){
int gu = blockIdx.x * blockDim.x + threadIdx.x + BU * bstep;
int gv = blockIdx.y * blockDim.y + threadIdx.y + BV * bstep;
for (int i = 0; i < num_of_vals; i++){
int du, dv, supp;
du = gu - uvo[i].u;
dv = gv - uvo[i].v;
supp = uvo[i].gcf_layer_supp;
// We have input u and v translated by -supp/2!
if (du >= 0 && dv >= 0 && du < supp && dv < supp) {
complexd supportPixel;
#define __layeroff du * supp + dv
if (is_half_gcf) {
int index = uvo[i].gcf_layer_index;
// Negative index indicates that original w was mirrored
// and we shall negate the index to obtain correct
// offset *and* conjugate the result.
if (index < 0) {
supportPixel = gcf[-index][__layeroff];
supportPixel.y = - supportPixel.y;
} else {
supportPixel = gcf[index][__layeroff];
}
} else {
supportPixel = gcf[uvo[i].gcf_layer_index][__layeroff];
}
#define __ADDPOL(pol) grid[gu][gv].pol = cuCfma(supportPixel, vis[i].pol, grid[gu][gv].pol);
__ADDPOL(XX)
__ADDPOL(XY)
__ADDPOL(YX)
__ADDPOL(XX)
}
}
}
#define gridKernelGather(suff, ishalf) \
extern "C" __global__ void gridKernelGather##suff( \
const Pregridded * uvo \
, const Double4c * vis \
, const cuDoubleComplex * gcf[] \
, Double4c grid[GRID_SIZE][GRID_SIZE] \
, int BU \
, int BV \
, int num_of_vals \
) { \
grid_kernel_gather<GRID_SIZE, BSTEP, ishalf>( \
BU \
, BV \
, uvo \
, vis \
, num_of_vals \
, gcf \
, grid \
); \
}
gridKernelGather(HalfGCF, true)
gridKernelGather(FullGCF, false)
|
710c5b7bde0ab97252978dafef6896e99d3db80f.hip | // !!! This is a file automatically generated by hipify!!!
/*Archivo mr3.cu que contiene el codigo para CUDA.
Renderizacion por OpenGL-CUDA interoperability
Nucleo del codigo para calcular la fuerza entre particulas
Creado por: Martinez Noriega Edgar Josafat
*/
#include "cras36def.h"
/////////////
#include <sys/time.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
// ***** CUDA includes
#include <hip/hip_runtime.h>
#include <nvcuvid.h>
#include <cudaGL.h>
#include <cuda_gl_interop.h>
#include <hip/hip_runtime_api.h>
#include <helper_cuda.h>
#define NMAX 8192*2
#define NTHRE 512
#define ATYPE 8
#define ATYPE2 (ATYPE * ATYPE)
#define ThreadsPB 512
//////For NaCl Optimized if_kernel
#define NTHREOPT 512
#define NDIVBIT 4
#define NDIV (1<<NDIVBIT)
#define NTHREOPT2 (NTHREOPT/NDIV)
#if 0
typedef struct {
float r[3];
int atype;
} VG_XVEC;
#endif
typedef struct {
float pol;
float sigm;
float ipotro;
float pc;
float pd;
float zz;
} VG_MATRIX;
/////////GLOBAL Variables/////////////////////////////////////////
//VG_XVEC *d_x=NULL;
//VG_XVEC *vec=NULL;
int *d_atypemat;
float *d_force=NULL;
float *d_side,*d_sideh;
float *d_amass,*d_vl;
float *d_ekin1;
float *d_ekin,*d_xs,*d_mtemp,*d_mpres;
float *d_glColr;
float *d_glCoord;
float *d_poss;
int *d_atype;
int mem_flg=0;
int mem_flg2=0;
int mem_sp=5;
int mem_cpu=0;
int flg1=0,flg2=0,flg3=0;
extern GLuint g_possVBO, g_colorVBO;
extern struct cudaGraphicsResource* g_strucPossVBOCUDA;
extern struct cudaGraphicsResource* g_strucColorVBOCUDA;
__constant__
VG_MATRIX c_matrix[4]={[0].pol=1.250000,[0].sigm=2.340000,[0].ipotro=3.154574,[0].pc=0.072868,[0].pd=0.034699,[0].zz=1.000000,
[1].pol=1.000000,[1].sigm=2.755000,[1].ipotro=3.154574,[1].pc=0.485784,[1].pd=0.602893,[1].zz=-1.000000,
[2].pol=1.000000,[2].sigm=2.755000,[2].ipotro=3.154574,[2].pc=0.485784,[2].pd=0.602893,[2].zz=-1.000000,
[3].pol=0.750000,[3].sigm=3.170000,[3].ipotro=3.154574,[3].pc=5.031334,[3].pd=10.106042,[3].zz=1.000000};
__constant__
float d_color_table[5][4]={ {0.35 ,0.19 ,0.19 ,1.0},
{0.19 ,0.275,0.19 ,1.0},
{1.0 ,0.4 ,1.0 ,1.0},
{0.0 ,0.8 ,1.0 ,1.0},
{1.0 ,1.0 ,1.0 ,1.0} };
//////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////
////////FORCE CALCULATION WITH GPU/////////////////////////////////////
//////////////////////////////////////////////////////////////////////
__global__
void update_coor_kernel(int n3, float *vl,float *xs,
float *fc,float *side, float *poss,
int *atype){
#ifdef KER
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n3){
vl[tid] = (vl[tid]*(1-(*xs))+fc[tid])/(1+(*xs));
poss[tid] += vl[tid];
//cd[tid/3].r[tid % 3] += vl[tid];
//if (cd[tid/3].r[tid % 3] < 0 || cd[tid/3].r[tid % 3] > side[tid % 3]) vl[tid] *= -1;
if (poss[tid] < 0 || poss[tid] > side[tid % 3]) vl[tid] *= -1;
}
#endif
}
//////////////////////////////////////////////////////////////////////////
__device__ __inline__
void inter_if(float xj[3], float xi[3], float fi[3], int t, float xmax,
float xmax1) {
#ifdef KER
int k;
float dn2, r, inr, inr2, inr4, inr8, d3, dr[3];
float pb = (float) (0.338e-19 / (14.39 * 1.60219e-19)), dphir;
dn2 = 0.0f;
for (k = 0; k < 3; k++) {
dr[k] = xi[k] - xj[k];
dr[k] -= rintf(dr[k] * xmax1) * xmax;
dn2 += dr[k] * dr[k];
}
r = sqrtf(dn2);
#if 1
inr = 1.0f / r;
#elif 0
if(dn2 != 0.0f) inr = 1.0f / r;
else inr = 0.0f;
#elif 0
if(dn2 == 0.0f) inr = 0.0f;
else inr = 1.0f / r;
#else
inr = 1.0f / r;
if(dn2 == 0.0f) inr = 0.0f;
#endif
inr2 = inr * inr;
inr4 = inr2 * inr2;
inr8 = inr4 * inr4;
d3 = pb * c_matrix[t].pol
* expf((c_matrix[t].sigm - r) * c_matrix[t].ipotro);
dphir =
(d3 * c_matrix[t].ipotro * inr - 6.0f * c_matrix[t].pc * inr8
- 8.0f * c_matrix[t].pd * inr8 * inr2
+ inr2 * inr * c_matrix[t].zz);
#if 1
if (dn2 == 0.0f)
dphir = 0.0f;
#endif
for (k = 0; k < 3; k++)
fi[k] += dphir * dr[k];
#endif
}
__global__
void nacl_kernel_if2(int n, int nat, float xmax, float *fvec, float *poss, int *atype) {
#ifdef KER
int tid = threadIdx.x;
int jdiv = tid / NTHREOPT2;
int i = blockIdx.x * NTHREOPT2 + (tid & (NTHREOPT2 - 1)); // Same + (tid %16)
int j, k;
float xmax1 = 1.0f / xmax;
int atypei;
float xi[3];
//__shared__ VG_XVEC s_xj[NTHREOPT];
__shared__ float s_xjj[NTHREOPT][3];
__shared__ int s_xa[NTHREOPT];
__shared__ float s_fi[NTHREOPT][3];
for (k = 0; k < 3; k++)
s_fi[tid][k] = 0.0f;
for (k = 0; k < 3; k++){
xi[k] = poss[i*3+k];
//xi[k] = x[i].r[k];
}
atypei = atype[i] * nat;
//atypei = x[i].atype * nat;
int na;
na = n / NTHREOPT;
na = na * NTHREOPT;
for (j = 0; j < na; j += NTHREOPT) {
__syncthreads();
//s_xj [tid] = x [j + tid];
s_xjj [tid][0] = poss [j*3 + tid*3];
s_xjj [tid][1] = poss [j*3 + tid*3+1];
s_xjj [tid][2] = poss [j*3 + tid*3+2];
s_xa [tid] = atype [j + tid];
__syncthreads();
#pragma unroll 16
for (int js = jdiv; js < NTHREOPT; js += NDIV)
//inter_if(s_xj[js].r, xi, s_fi[tid], atypei + s_xa[js], xmax, xmax1);
inter_if(s_xjj[js], xi, s_fi[tid], atypei + s_xa[js], xmax, xmax1);
}
for (j = na + jdiv; j < n; j += NDIV) {
//inter_if(x[j].r, xi, s_fi[tid], atypei + x[j].atype, xmax, xmax1);
inter_if(poss+j*3, xi, s_fi[tid], atypei + atype[j], xmax, xmax1);
}
#if NTHREOPT>=512 && NTHREOPT2<=256
__syncthreads();
if(tid<256) for(k=0;k<3;k++) s_fi[tid][k]+=s_fi[tid+256][k];
#endif
#if NTHREOPT>=256 && NTHREOPT2<=128
__syncthreads();
if (tid < 128)
for (k = 0; k < 3; k++)
s_fi[tid][k] += s_fi[tid + 128][k];
#endif
#if NTHREOPT>=128 && NTHREOPT2<=64
__syncthreads();
if (tid < 64)
for (k = 0; k < 3; k++)
s_fi[tid][k] += s_fi[tid + 64][k];
#endif
#if NTHREOPT>=64 && NTHREOPT2<=32
__syncthreads();
if (tid < 32)
for (k = 0; k < 3; k++)
s_fi[tid][k] += s_fi[tid + 32][k];
#endif
#if NTHREOPT2<=16
if (tid < 16)
for (k = 0; k < 3; k++)
s_fi[tid][k] += s_fi[tid + 16][k];
#endif
#if NTHREOPT2<=8
if(tid<8) for(k=0;k<3;k++) s_fi[tid][k]+=s_fi[tid+8][k];
#endif
#if NTHREOPT2<=4
if(tid<4) for(k=0;k<3;k++) s_fi[tid][k]+=s_fi[tid+4][k];
#endif
#if NTHREOPT2<=2
if(tid<2) for(k=0;k<3;k++) s_fi[tid][k]+=s_fi[tid+2][k];
#endif
#if NTHREOPT2<=1
if(tid<1) for(k=0;k<3;k++) s_fi[tid][k]+=s_fi[tid+1][k];
#endif
if (jdiv == 0)
for (k = 0; k < 3; k++)
fvec[i * 3 + k] = s_fi[tid][k];
#endif
}
__global__
void velforce_kernel(int n3, float *fc, float *a_mass, float *vl,
int *atype_mat, float hsq,float *ekin1,
int *atype, float *poss, float *sideh, float *coord){
#ifdef KER
__shared__ float cache [ThreadsPB];
int indx = threadIdx.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
cache [indx] = 0;
if (tid < n3 ){
#ifdef INTEROP
coord[tid] = poss[tid] - sideh[tid % 3]; //sideh[tid % 3]; // for graphics VBO -- Position
#endif
fc[tid]-= fc[tid]/(n3/3);
fc[tid] *= hsq/a_mass[atype_mat[atype[tid/3]]];
cache [indx] = vl[tid]*vl[tid]*a_mass[atype_mat[atype[tid/3]]];
}
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (indx < s)
{
cache[indx] += cache[indx + s];
}
__syncthreads();
}
if (indx == 0) ekin1[blockIdx.x] = cache [0];
#endif
}
__global__
void reduction (float *ekin,float *mtemp,float *mpres,float *xs,float tscale,
float nden, float vir,int s_num,int w_num,float rtemp,
float lq,float hsq,float *ekin1, int limi){
#ifdef KER
__shared__ float cache [NTHREOPT];
int indx = threadIdx.x;
cache [indx] = (indx < limi) ? ekin1[indx]:0.0f;
__syncthreads();
for (unsigned int s=NTHREOPT/2; s>0; s>>=1){
if (indx < s)
{
cache[indx] += cache[indx + s];
}
__syncthreads();
}
if (indx == 0){
*ekin = cache [0];
*ekin /= hsq;
*mtemp = tscale * (*ekin);
*mpres = nden / 3.f * ((*ekin) - (vir)) / (s_num + w_num);
*xs += (*mtemp - rtemp) / lq * hsq *.5f;
}
#endif
}
#ifdef INTEROP
__global__
void colorn4(int n4,float *vl,int *atype_mat, float *colorvbo, int *atype){
#ifdef KER
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float d0;
float d0aux[4];
d0 = (vl[tid/4]*vl[tid/4]+vl[tid/4+1]*vl[tid/4+1]+vl[tid/4+2]*vl[tid/4+2])*500;
d0aux[0] = d0;
d0aux[1] = d0/3;
d0aux[2] = d0/3;
d0aux[3] = 0;
if (tid < n4){
colorvbo[tid] = d_color_table[atype_mat[atype[tid/4]]][tid%4] + d0aux[tid%4];
}
#endif
}
#endif
#ifdef DP
__global__
void md_loop_cuda ( int n3, float *vl,float *xs,float *fc,float *side,
int n, int nat, float xmax,
float *a_mass, int *atype_mat, float hsq,float *ekin1,
float *ekin,float *mtemp,float *mpres,float tscale,
float nden, float vir,int s_num,int w_num,float rtemp,
float lq,int limi,
int md_step, float *sideh, float *colorvbo, float *poss,
int *atype, float *coord)
{
#if 1
int blocksPGrid = (n3 + ThreadsPB - 1)/(ThreadsPB);
dim3 THREADS(NTHRE);
dim3 BLOCKS((n3 + ThreadsPB - 1)/(ThreadsPB));
dim3 threads(NTHREOPT);
dim3 grid((n * NDIV + NTHREOPT - 1) / NTHREOPT);
dim3 colorgridn4(((n*4) + ThreadsPB - 1)/(ThreadsPB));
for(int md_loop = 0; md_loop < md_step; md_loop++){
hipLaunchKernelGGL(( update_coor_kernel), dim3(BLOCKS),dim3(THREADS), 0, 0, n3,vl,xs,fc,side,poss,atype);
hipLaunchKernelGGL(( nacl_kernel_if2), dim3(grid), dim3(threads), 0, 0, n, nat, xmax,fc,poss,atype);
hipLaunchKernelGGL(( velforce_kernel), dim3(BLOCKS),dim3(THREADS), 0, 0, n3,fc,a_mass,vl,atype_mat,hsq,ekin1,atype,poss,sideh,coord);
hipLaunchKernelGGL(( reduction), dim3(1),dim3(NTHRE), 0, 0, ekin,mtemp,mpres,xs,tscale,nden,vir,s_num,w_num,rtemp,lq,hsq,ekin1,blocksPGrid);
}
#ifdef INTEROP
hipLaunchKernelGGL(( colorn4), dim3(colorgridn4),dim3(THREADS), 0, 0, n*4,vl,atype_mat,colorvbo,atype);
#endif
#endif
}
#endif
//////////////////NaCl Optmized
///////////////////////////////
extern "C"
void mdlop(int n3,int grape_flg,double phi [3],double *phir,double *iphi, double *vir,int s_num3,
timeval time_v,double *md_time0,double *md_time,int *m_clock,int md_step,double *mtemp,
double tscale,double *mpres,double nden,int s_num,int w_num,double rtemp,double lq,
double x[], int n, int atype[], int nat,
double pol[], double sigm[], double ipotro[],
double pc[], double pd[],double zz[],
int tblno, double xmax, int periodicflag,
double force[],
double hsq,double a_mass [], int atype_mat [], double *ekin,double *vl,
double *xs,double side [],int *firstmalloc, double sideh[]){
//////////////VARIABLES FROM THE BEGINING/////////////////
int i,j;
float xmaxf;
if((periodicflag & 1)==0) xmax*=2.0;
xmaxf=xmax;
int n4 = n*4;
/////////////////////////////////////////////////////////
int blocksPGrid = (n3 + ThreadsPB - 1)/(ThreadsPB);
dim3 THREADS(NTHRE);
dim3 BLOCKS((n3 + ThreadsPB - 1)/(ThreadsPB));
dim3 threads(NTHREOPT);
dim3 grid((n * NDIV + NTHREOPT - 1) / NTHREOPT);
dim3 colorgridn4((n4 + ThreadsPB - 1)/(ThreadsPB));
float fxs = *xs;
float fside[3],*ffc, fsideh[3];
float *vla;
//VG_XVEC *veca;
int p = 0;
float hsqf = hsq;
float *fvl,fa_mass[4];
float ftscale = tscale,fnden = nden,frtemp = rtemp,flq = lq,fvir = 0;
float fmtemp = *mtemp,fmpres = *mpres;
float *fposs, *cord;
int *auxatype;
vla = (float*) malloc(n3*sizeof(float));
cord = (float*) malloc(n3*sizeof(float));
//veca = (VG_XVEC*)malloc((n+NTHREOPT2)*sizeof(VG_XVEC));
if(*firstmalloc == 0){
printf("CUDA malloc time...\n");
// Allocating memory for float conversion.
ffc = (float*) malloc(n3*sizeof(float));
fvl = (float*) malloc(n3*sizeof(float));
//vec = (VG_XVEC*) malloc((NMAX+NTHREOPT2)*sizeof(VG_XVEC));
auxatype = (int*) malloc((NMAX+NTHREOPT2)*sizeof(int));
fposs = (float*) malloc((NMAX+NTHREOPT2)*3*sizeof(float));
// Conversion from Double to Float
for (p=0;p<4;p++) fa_mass[p] = (float) a_mass[p];
for (p=0;p<3;p++) fside[p] = (float) side[p];
for (p=0;p<3;p++) fsideh[p] = (float) sideh[p];
for (p=0;p<n3;p++){
fvl [p] = (float) *(vl +p);
ffc [p] = (float) *(force +p);
}
for (i = 0; i < (n + NTHREOPT2 - 1) / NTHREOPT2 * NTHREOPT2; i++) {
if (i < n) {
for (j = 0; j < 3; j++) {
fposs[i * 3 + j] = x[i * 3 + j];
}
auxatype[i] = atype[i];
}
else {
for (j = 0; j < 3; j++) {
fposs[i * 3 + j] = 0.0f;
}
auxatype[i] = 0;
}
}
#ifdef INTEROP
// As for CUDA-OpenGL Inter-operability
// Unregister and clean OpenGL and CUDA resources
hipGraphicsUnregisterResource(g_strucPossVBOCUDA);
hipGraphicsUnregisterResource(g_strucColorVBOCUDA);
glDeleteBuffers(1,&g_possVBO);
glDeleteBuffers(1,&g_colorVBO);
// Register CUDA and OpenGL Interop
// Creation of share buffer between CUDA and OpenGL
int n = n3/3;
// For Position
glGenBuffers(1, &g_possVBO);
glBindBuffer(GL_ARRAY_BUFFER, g_possVBO);
//unsigned int size = (S_NUM_MAX+W_NUM_MAX*w_site+100) * 3 * sizeof(float);
unsigned int size = (NMAX+NTHREOPT2)*3*sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// For Color
glGenBuffers(1, &g_colorVBO);
glBindBuffer(GL_ARRAY_BUFFER, g_colorVBO);
size = (NMAX+NTHREOPT2)*4*sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
checkCudaErrors(hipGraphicsGLRegisterBuffer(&g_strucPossVBOCUDA,g_possVBO,hipGraphicsMapFlagsNone));
checkCudaErrors(hipGraphicsGLRegisterBuffer(&g_strucColorVBOCUDA,g_colorVBO,hipGraphicsMapFlagsNone));
// Position
size_t vbosizepos;
checkCudaErrors(hipGraphicsMapResources(1,&g_strucPossVBOCUDA,0));
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&d_glCoord,&vbosizepos,g_strucPossVBOCUDA));
// Color
size_t vbosizecol;
checkCudaErrors(hipGraphicsMapResources(1,&g_strucColorVBOCUDA,0));
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&d_glColr,&vbosizecol,g_strucColorVBOCUDA));
#endif
// Free CUDA memory. In case we already allocate
// checkCudaErrors(hipFree(d_x));
checkCudaErrors(hipFree(d_poss));
checkCudaErrors(hipFree(d_force));
checkCudaErrors(hipFree(d_side));
checkCudaErrors(hipFree(d_sideh));
checkCudaErrors(hipFree(d_amass));
checkCudaErrors(hipFree(d_vl));
checkCudaErrors(hipFree(d_atypemat));
checkCudaErrors(hipFree(d_ekin));
checkCudaErrors(hipFree(d_xs));
checkCudaErrors(hipFree(d_mtemp));
checkCudaErrors(hipFree(d_mpres));
checkCudaErrors(hipFree(d_ekin1));
checkCudaErrors(hipFree(d_atype));
// Allocate global memory to GPU
//checkCudaErrors(hipMalloc((void**)&d_x,sizeof(VG_XVEC)* (NMAX + NTHREOPT2)));
checkCudaErrors(hipMalloc((void**)&d_poss,sizeof(float)*(NMAX + NTHREOPT2)*3));
checkCudaErrors(hipMalloc((void**)&d_force,sizeof(float)*(NMAX + NTHREOPT2)*3));
checkCudaErrors(hipMalloc((void**)&d_side,3*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_sideh,3*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_amass,4*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_vl,n3*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_atypemat,20*sizeof(int)));
checkCudaErrors(hipMalloc((void**)&d_ekin,sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_xs,sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_mtemp,sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_mpres,sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_ekin1,blocksPGrid*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_atype,sizeof(int)*(NMAX + NTHREOPT2)));
// Copy memory from CPU to GPU
//checkCudaErrors(hipMemcpy(d_x,vec,sizeof(VG_XVEC)*((n + NTHREOPT2 - 1) / NTHREOPT2 * NTHREOPT2),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_poss,fposs,sizeof(float)*3*((n + NTHREOPT2 - 1) / NTHREOPT2 * NTHREOPT2),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_side,fside,sizeof(float)*3,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_sideh,fsideh,sizeof(float)*3,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_mtemp,&fmtemp,sizeof(float),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_mpres,&fmpres,sizeof(float),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_xs,&fxs,sizeof(float),hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_vl,fvl,sizeof(float)*n3,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_amass,fa_mass,sizeof(float)*4,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_atypemat,atype_mat,sizeof(int)*20,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_force,ffc,sizeof(float)*n*3,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_atype,auxatype,sizeof(int)*((n + NTHREOPT2 - 1) / NTHREOPT2 * NTHREOPT2),hipMemcpyHostToDevice));
// Free the memory used to convert from Double to Float
free(ffc);
free(fvl);
free(fposs);
free(auxatype);
}
else{
#ifdef INTEROP
// Position
size_t vbosizepos;
checkCudaErrors(hipGraphicsMapResources(1,&g_strucPossVBOCUDA,0));
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&d_glCoord,&vbosizepos,g_strucPossVBOCUDA));
// Color
size_t vbosizecol;
checkCudaErrors(hipGraphicsMapResources(1,&g_strucColorVBOCUDA,0));
checkCudaErrors(hipGraphicsResourceGetMappedPointer((void**)&d_glColr,&vbosizecol,g_strucColorVBOCUDA));
#endif
}
///////Md_loop///////////////////////////////////////////////
#ifdef DP
#ifndef TIME_MEMORY
gettimeofday(&time_v,NULL);
*md_time0 = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
#endif
// for (int m=0;m<1000;m++){
hipLaunchKernelGGL(( md_loop_cuda), dim3(1),dim3(1), 0, 0, n3,d_vl,d_xs,d_force,d_side,
n,nat,xmaxf,
d_amass,d_atypemat,hsqf,d_ekin1,
d_ekin,d_mtemp,d_mpres,ftscale,fnden,fvir,s_num,w_num,frtemp,flq,blocksPGrid,
md_step,d_sideh,d_glColr,d_poss,d_atype,d_glCoord);
//}
*m_clock+=md_step;
hipDeviceSynchronize();
#ifndef TIME_MEMORY
gettimeofday(&time_v,NULL);
*md_time = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
#endif
#else
gettimeofday(&time_v,NULL);
*md_time0 = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
//for (int m=0;m<1000;m++){
for(int md_loop = 0; md_loop < md_step; md_loop++){
hipLaunchKernelGGL(( update_coor_kernel), dim3(BLOCKS),dim3(THREADS), 0, 0, n3,d_vl,d_xs,d_force,d_side,d_poss,d_atype);
hipLaunchKernelGGL(( nacl_kernel_if2), dim3(grid), dim3(threads), 0, 0, n, nat, xmaxf, d_force,d_poss,d_atype);
hipLaunchKernelGGL(( velforce_kernel), dim3(BLOCKS),dim3(THREADS), 0, 0, n3,d_force,d_amass,d_vl,d_atypemat,hsqf,d_ekin1,d_atype,d_poss,d_sideh,d_glCoord);
hipLaunchKernelGGL(( reduction), dim3(1),dim3(threads), 0, 0, d_ekin,d_mtemp,d_mpres,d_xs,ftscale,fnden,fvir,s_num,w_num,frtemp,flq,hsqf,d_ekin1,blocksPGrid);
}
#ifdef INTEROP
hipLaunchKernelGGL(( colorn4), dim3(colorgridn4),dim3(THREADS), 0, 0, n4,d_vl,d_atypemat,d_glColr,d_atype); // Just update after the cycle. For color output.
#endif
//}
*m_clock+=md_step;
hipDeviceSynchronize();
gettimeofday(&time_v,NULL);
*md_time = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
#endif
/////////////////Copy back to the CPU
//CUDA_SAFE_CALL(hipMemcpy(forcef,d_force,sizeof(float)*n*3,hipMemcpyDeviceToHost));
//CUDA_SAFE_CALL(hipMemcpy(&fxs,d_xs,sizeof(float),hipMemcpyDeviceToHost));
#ifdef TIME_MEMORY
gettimeofday(&time_v,NULL);
*md_time0 = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
#endif
#ifdef INTEROP
checkCudaErrors(hipGraphicsUnmapResources(1,&g_strucPossVBOCUDA,0));
checkCudaErrors(hipGraphicsUnmapResources(1,&g_strucColorVBOCUDA,0));
#endif
#ifndef INTEROP
checkCudaErrors(hipMemcpy(vla,d_vl,n3*sizeof(float),hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(cord,d_poss,n3*sizeof(float),hipMemcpyDeviceToHost));
for(p=0;p<n3;p++) *(vl+p) = (double) vla[p];
for(i=0;i<n;i++)for(j=0;j<3;j++) *(x+i*3+j) = (double)cord[j+i*3];
#endif
#ifdef TIME_MEMORY
gettimeofday(&time_v,NULL);
*md_time = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
#endif
free(vla);
free(cord);
*firstmalloc = 1;
}
| 710c5b7bde0ab97252978dafef6896e99d3db80f.cu | /*Archivo mr3.cu que contiene el codigo para CUDA.
Renderizacion por OpenGL-CUDA interoperability
Nucleo del codigo para calcular la fuerza entre particulas
Creado por: Martinez Noriega Edgar Josafat
*/
#include "cras36def.h"
/////////////
#include <sys/time.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
// ***** CUDA includes
#include <cuda.h>
#include <nvcuvid.h>
#include <cudaGL.h>
#include <cuda_gl_interop.h>
#include <cuda_runtime_api.h>
#include <helper_cuda.h>
#define NMAX 8192*2
#define NTHRE 512
#define ATYPE 8
#define ATYPE2 (ATYPE * ATYPE)
#define ThreadsPB 512
//////For NaCl Optimized if_kernel
#define NTHREOPT 512
#define NDIVBIT 4
#define NDIV (1<<NDIVBIT)
#define NTHREOPT2 (NTHREOPT/NDIV)
#if 0
typedef struct {
float r[3];
int atype;
} VG_XVEC;
#endif
typedef struct {
float pol;
float sigm;
float ipotro;
float pc;
float pd;
float zz;
} VG_MATRIX;
/////////GLOBAL Variables/////////////////////////////////////////
//VG_XVEC *d_x=NULL;
//VG_XVEC *vec=NULL;
int *d_atypemat;
float *d_force=NULL;
float *d_side,*d_sideh;
float *d_amass,*d_vl;
float *d_ekin1;
float *d_ekin,*d_xs,*d_mtemp,*d_mpres;
float *d_glColr;
float *d_glCoord;
float *d_poss;
int *d_atype;
int mem_flg=0;
int mem_flg2=0;
int mem_sp=5;
int mem_cpu=0;
int flg1=0,flg2=0,flg3=0;
extern GLuint g_possVBO, g_colorVBO;
extern struct cudaGraphicsResource* g_strucPossVBOCUDA;
extern struct cudaGraphicsResource* g_strucColorVBOCUDA;
__constant__
VG_MATRIX c_matrix[4]={[0].pol=1.250000,[0].sigm=2.340000,[0].ipotro=3.154574,[0].pc=0.072868,[0].pd=0.034699,[0].zz=1.000000,
[1].pol=1.000000,[1].sigm=2.755000,[1].ipotro=3.154574,[1].pc=0.485784,[1].pd=0.602893,[1].zz=-1.000000,
[2].pol=1.000000,[2].sigm=2.755000,[2].ipotro=3.154574,[2].pc=0.485784,[2].pd=0.602893,[2].zz=-1.000000,
[3].pol=0.750000,[3].sigm=3.170000,[3].ipotro=3.154574,[3].pc=5.031334,[3].pd=10.106042,[3].zz=1.000000};
__constant__
float d_color_table[5][4]={ {0.35 ,0.19 ,0.19 ,1.0},
{0.19 ,0.275,0.19 ,1.0},
{1.0 ,0.4 ,1.0 ,1.0},
{0.0 ,0.8 ,1.0 ,1.0},
{1.0 ,1.0 ,1.0 ,1.0} };
//////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////
////////FORCE CALCULATION WITH GPU/////////////////////////////////////
//////////////////////////////////////////////////////////////////////
__global__
void update_coor_kernel(int n3, float *vl,float *xs,
float *fc,float *side, float *poss,
int *atype){
#ifdef KER
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n3){
vl[tid] = (vl[tid]*(1-(*xs))+fc[tid])/(1+(*xs));
poss[tid] += vl[tid];
//cd[tid/3].r[tid % 3] += vl[tid];
//if (cd[tid/3].r[tid % 3] < 0 || cd[tid/3].r[tid % 3] > side[tid % 3]) vl[tid] *= -1;
if (poss[tid] < 0 || poss[tid] > side[tid % 3]) vl[tid] *= -1;
}
#endif
}
//////////////////////////////////////////////////////////////////////////
__device__ __inline__
void inter_if(float xj[3], float xi[3], float fi[3], int t, float xmax,
float xmax1) {
#ifdef KER
int k;
float dn2, r, inr, inr2, inr4, inr8, d3, dr[3];
float pb = (float) (0.338e-19 / (14.39 * 1.60219e-19)), dphir;
dn2 = 0.0f;
for (k = 0; k < 3; k++) {
dr[k] = xi[k] - xj[k];
dr[k] -= rintf(dr[k] * xmax1) * xmax;
dn2 += dr[k] * dr[k];
}
r = sqrtf(dn2);
#if 1
inr = 1.0f / r;
#elif 0
if(dn2 != 0.0f) inr = 1.0f / r;
else inr = 0.0f;
#elif 0
if(dn2 == 0.0f) inr = 0.0f;
else inr = 1.0f / r;
#else
inr = 1.0f / r;
if(dn2 == 0.0f) inr = 0.0f;
#endif
inr2 = inr * inr;
inr4 = inr2 * inr2;
inr8 = inr4 * inr4;
d3 = pb * c_matrix[t].pol
* expf((c_matrix[t].sigm - r) * c_matrix[t].ipotro);
dphir =
(d3 * c_matrix[t].ipotro * inr - 6.0f * c_matrix[t].pc * inr8
- 8.0f * c_matrix[t].pd * inr8 * inr2
+ inr2 * inr * c_matrix[t].zz);
#if 1
if (dn2 == 0.0f)
dphir = 0.0f;
#endif
for (k = 0; k < 3; k++)
fi[k] += dphir * dr[k];
#endif
}
__global__
void nacl_kernel_if2(int n, int nat, float xmax, float *fvec, float *poss, int *atype) {
#ifdef KER
int tid = threadIdx.x;
int jdiv = tid / NTHREOPT2;
int i = blockIdx.x * NTHREOPT2 + (tid & (NTHREOPT2 - 1)); // Same + (tid %16)
int j, k;
float xmax1 = 1.0f / xmax;
int atypei;
float xi[3];
//__shared__ VG_XVEC s_xj[NTHREOPT];
__shared__ float s_xjj[NTHREOPT][3];
__shared__ int s_xa[NTHREOPT];
__shared__ float s_fi[NTHREOPT][3];
for (k = 0; k < 3; k++)
s_fi[tid][k] = 0.0f;
for (k = 0; k < 3; k++){
xi[k] = poss[i*3+k];
//xi[k] = x[i].r[k];
}
atypei = atype[i] * nat;
//atypei = x[i].atype * nat;
int na;
na = n / NTHREOPT;
na = na * NTHREOPT;
for (j = 0; j < na; j += NTHREOPT) {
__syncthreads();
//s_xj [tid] = x [j + tid];
s_xjj [tid][0] = poss [j*3 + tid*3];
s_xjj [tid][1] = poss [j*3 + tid*3+1];
s_xjj [tid][2] = poss [j*3 + tid*3+2];
s_xa [tid] = atype [j + tid];
__syncthreads();
#pragma unroll 16
for (int js = jdiv; js < NTHREOPT; js += NDIV)
//inter_if(s_xj[js].r, xi, s_fi[tid], atypei + s_xa[js], xmax, xmax1);
inter_if(s_xjj[js], xi, s_fi[tid], atypei + s_xa[js], xmax, xmax1);
}
for (j = na + jdiv; j < n; j += NDIV) {
//inter_if(x[j].r, xi, s_fi[tid], atypei + x[j].atype, xmax, xmax1);
inter_if(poss+j*3, xi, s_fi[tid], atypei + atype[j], xmax, xmax1);
}
#if NTHREOPT>=512 && NTHREOPT2<=256
__syncthreads();
if(tid<256) for(k=0;k<3;k++) s_fi[tid][k]+=s_fi[tid+256][k];
#endif
#if NTHREOPT>=256 && NTHREOPT2<=128
__syncthreads();
if (tid < 128)
for (k = 0; k < 3; k++)
s_fi[tid][k] += s_fi[tid + 128][k];
#endif
#if NTHREOPT>=128 && NTHREOPT2<=64
__syncthreads();
if (tid < 64)
for (k = 0; k < 3; k++)
s_fi[tid][k] += s_fi[tid + 64][k];
#endif
#if NTHREOPT>=64 && NTHREOPT2<=32
__syncthreads();
if (tid < 32)
for (k = 0; k < 3; k++)
s_fi[tid][k] += s_fi[tid + 32][k];
#endif
#if NTHREOPT2<=16
if (tid < 16)
for (k = 0; k < 3; k++)
s_fi[tid][k] += s_fi[tid + 16][k];
#endif
#if NTHREOPT2<=8
if(tid<8) for(k=0;k<3;k++) s_fi[tid][k]+=s_fi[tid+8][k];
#endif
#if NTHREOPT2<=4
if(tid<4) for(k=0;k<3;k++) s_fi[tid][k]+=s_fi[tid+4][k];
#endif
#if NTHREOPT2<=2
if(tid<2) for(k=0;k<3;k++) s_fi[tid][k]+=s_fi[tid+2][k];
#endif
#if NTHREOPT2<=1
if(tid<1) for(k=0;k<3;k++) s_fi[tid][k]+=s_fi[tid+1][k];
#endif
if (jdiv == 0)
for (k = 0; k < 3; k++)
fvec[i * 3 + k] = s_fi[tid][k];
#endif
}
__global__
void velforce_kernel(int n3, float *fc, float *a_mass, float *vl,
int *atype_mat, float hsq,float *ekin1,
int *atype, float *poss, float *sideh, float *coord){
#ifdef KER
__shared__ float cache [ThreadsPB];
int indx = threadIdx.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
cache [indx] = 0;
if (tid < n3 ){
#ifdef INTEROP
coord[tid] = poss[tid] - sideh[tid % 3]; //sideh[tid % 3]; // for graphics VBO -- Position
#endif
fc[tid]-= fc[tid]/(n3/3);
fc[tid] *= hsq/a_mass[atype_mat[atype[tid/3]]];
cache [indx] = vl[tid]*vl[tid]*a_mass[atype_mat[atype[tid/3]]];
}
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (indx < s)
{
cache[indx] += cache[indx + s];
}
__syncthreads();
}
if (indx == 0) ekin1[blockIdx.x] = cache [0];
#endif
}
__global__
void reduction (float *ekin,float *mtemp,float *mpres,float *xs,float tscale,
float nden, float vir,int s_num,int w_num,float rtemp,
float lq,float hsq,float *ekin1, int limi){
#ifdef KER
__shared__ float cache [NTHREOPT];
int indx = threadIdx.x;
cache [indx] = (indx < limi) ? ekin1[indx]:0.0f;
__syncthreads();
for (unsigned int s=NTHREOPT/2; s>0; s>>=1){
if (indx < s)
{
cache[indx] += cache[indx + s];
}
__syncthreads();
}
if (indx == 0){
*ekin = cache [0];
*ekin /= hsq;
*mtemp = tscale * (*ekin);
*mpres = nden / 3.f * ((*ekin) - (vir)) / (s_num + w_num);
*xs += (*mtemp - rtemp) / lq * hsq *.5f;
}
#endif
}
#ifdef INTEROP
__global__
void colorn4(int n4,float *vl,int *atype_mat, float *colorvbo, int *atype){
#ifdef KER
int tid = threadIdx.x + blockIdx.x * blockDim.x;
float d0;
float d0aux[4];
d0 = (vl[tid/4]*vl[tid/4]+vl[tid/4+1]*vl[tid/4+1]+vl[tid/4+2]*vl[tid/4+2])*500;
d0aux[0] = d0;
d0aux[1] = d0/3;
d0aux[2] = d0/3;
d0aux[3] = 0;
if (tid < n4){
colorvbo[tid] = d_color_table[atype_mat[atype[tid/4]]][tid%4] + d0aux[tid%4];
}
#endif
}
#endif
#ifdef DP
__global__
void md_loop_cuda ( int n3, float *vl,float *xs,float *fc,float *side,
int n, int nat, float xmax,
float *a_mass, int *atype_mat, float hsq,float *ekin1,
float *ekin,float *mtemp,float *mpres,float tscale,
float nden, float vir,int s_num,int w_num,float rtemp,
float lq,int limi,
int md_step, float *sideh, float *colorvbo, float *poss,
int *atype, float *coord)
{
#if 1
int blocksPGrid = (n3 + ThreadsPB - 1)/(ThreadsPB);
dim3 THREADS(NTHRE);
dim3 BLOCKS((n3 + ThreadsPB - 1)/(ThreadsPB));
dim3 threads(NTHREOPT);
dim3 grid((n * NDIV + NTHREOPT - 1) / NTHREOPT);
dim3 colorgridn4(((n*4) + ThreadsPB - 1)/(ThreadsPB));
for(int md_loop = 0; md_loop < md_step; md_loop++){
update_coor_kernel<<<BLOCKS,THREADS>>>(n3,vl,xs,fc,side,poss,atype);
nacl_kernel_if2<<<grid, threads>>>(n, nat, xmax,fc,poss,atype);
velforce_kernel<<<BLOCKS,THREADS>>>(n3,fc,a_mass,vl,atype_mat,hsq,ekin1,atype,poss,sideh,coord);
reduction<<<1,NTHRE>>>(ekin,mtemp,mpres,xs,tscale,nden,vir,s_num,w_num,rtemp,lq,hsq,ekin1,blocksPGrid);
}
#ifdef INTEROP
colorn4<<<colorgridn4,THREADS>>>(n*4,vl,atype_mat,colorvbo,atype);
#endif
#endif
}
#endif
//////////////////NaCl Optmized
///////////////////////////////
extern "C"
void mdlop(int n3,int grape_flg,double phi [3],double *phir,double *iphi, double *vir,int s_num3,
timeval time_v,double *md_time0,double *md_time,int *m_clock,int md_step,double *mtemp,
double tscale,double *mpres,double nden,int s_num,int w_num,double rtemp,double lq,
double x[], int n, int atype[], int nat,
double pol[], double sigm[], double ipotro[],
double pc[], double pd[],double zz[],
int tblno, double xmax, int periodicflag,
double force[],
double hsq,double a_mass [], int atype_mat [], double *ekin,double *vl,
double *xs,double side [],int *firstmalloc, double sideh[]){
//////////////VARIABLES FROM THE BEGINING/////////////////
int i,j;
float xmaxf;
if((periodicflag & 1)==0) xmax*=2.0;
xmaxf=xmax;
int n4 = n*4;
/////////////////////////////////////////////////////////
int blocksPGrid = (n3 + ThreadsPB - 1)/(ThreadsPB);
dim3 THREADS(NTHRE);
dim3 BLOCKS((n3 + ThreadsPB - 1)/(ThreadsPB));
dim3 threads(NTHREOPT);
dim3 grid((n * NDIV + NTHREOPT - 1) / NTHREOPT);
dim3 colorgridn4((n4 + ThreadsPB - 1)/(ThreadsPB));
float fxs = *xs;
float fside[3],*ffc, fsideh[3];
float *vla;
//VG_XVEC *veca;
int p = 0;
float hsqf = hsq;
float *fvl,fa_mass[4];
float ftscale = tscale,fnden = nden,frtemp = rtemp,flq = lq,fvir = 0;
float fmtemp = *mtemp,fmpres = *mpres;
float *fposs, *cord;
int *auxatype;
vla = (float*) malloc(n3*sizeof(float));
cord = (float*) malloc(n3*sizeof(float));
//veca = (VG_XVEC*)malloc((n+NTHREOPT2)*sizeof(VG_XVEC));
if(*firstmalloc == 0){
printf("CUDA malloc time...\n");
// Allocating memory for float conversion.
ffc = (float*) malloc(n3*sizeof(float));
fvl = (float*) malloc(n3*sizeof(float));
//vec = (VG_XVEC*) malloc((NMAX+NTHREOPT2)*sizeof(VG_XVEC));
auxatype = (int*) malloc((NMAX+NTHREOPT2)*sizeof(int));
fposs = (float*) malloc((NMAX+NTHREOPT2)*3*sizeof(float));
// Conversion from Double to Float
for (p=0;p<4;p++) fa_mass[p] = (float) a_mass[p];
for (p=0;p<3;p++) fside[p] = (float) side[p];
for (p=0;p<3;p++) fsideh[p] = (float) sideh[p];
for (p=0;p<n3;p++){
fvl [p] = (float) *(vl +p);
ffc [p] = (float) *(force +p);
}
for (i = 0; i < (n + NTHREOPT2 - 1) / NTHREOPT2 * NTHREOPT2; i++) {
if (i < n) {
for (j = 0; j < 3; j++) {
fposs[i * 3 + j] = x[i * 3 + j];
}
auxatype[i] = atype[i];
}
else {
for (j = 0; j < 3; j++) {
fposs[i * 3 + j] = 0.0f;
}
auxatype[i] = 0;
}
}
#ifdef INTEROP
// As for CUDA-OpenGL Inter-operability
// Unregister and clean OpenGL and CUDA resources
cudaGraphicsUnregisterResource(g_strucPossVBOCUDA);
cudaGraphicsUnregisterResource(g_strucColorVBOCUDA);
glDeleteBuffers(1,&g_possVBO);
glDeleteBuffers(1,&g_colorVBO);
// Register CUDA and OpenGL Interop
// Creation of share buffer between CUDA and OpenGL
int n = n3/3;
// For Position
glGenBuffers(1, &g_possVBO);
glBindBuffer(GL_ARRAY_BUFFER, g_possVBO);
//unsigned int size = (S_NUM_MAX+W_NUM_MAX*w_site+100) * 3 * sizeof(float);
unsigned int size = (NMAX+NTHREOPT2)*3*sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// For Color
glGenBuffers(1, &g_colorVBO);
glBindBuffer(GL_ARRAY_BUFFER, g_colorVBO);
size = (NMAX+NTHREOPT2)*4*sizeof(float);
glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
checkCudaErrors(cudaGraphicsGLRegisterBuffer(&g_strucPossVBOCUDA,g_possVBO,cudaGraphicsMapFlagsNone));
checkCudaErrors(cudaGraphicsGLRegisterBuffer(&g_strucColorVBOCUDA,g_colorVBO,cudaGraphicsMapFlagsNone));
// Position
size_t vbosizepos;
checkCudaErrors(cudaGraphicsMapResources(1,&g_strucPossVBOCUDA,0));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&d_glCoord,&vbosizepos,g_strucPossVBOCUDA));
// Color
size_t vbosizecol;
checkCudaErrors(cudaGraphicsMapResources(1,&g_strucColorVBOCUDA,0));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&d_glColr,&vbosizecol,g_strucColorVBOCUDA));
#endif
// Free CUDA memory. In case we already allocate
// checkCudaErrors(cudaFree(d_x));
checkCudaErrors(cudaFree(d_poss));
checkCudaErrors(cudaFree(d_force));
checkCudaErrors(cudaFree(d_side));
checkCudaErrors(cudaFree(d_sideh));
checkCudaErrors(cudaFree(d_amass));
checkCudaErrors(cudaFree(d_vl));
checkCudaErrors(cudaFree(d_atypemat));
checkCudaErrors(cudaFree(d_ekin));
checkCudaErrors(cudaFree(d_xs));
checkCudaErrors(cudaFree(d_mtemp));
checkCudaErrors(cudaFree(d_mpres));
checkCudaErrors(cudaFree(d_ekin1));
checkCudaErrors(cudaFree(d_atype));
// Allocate global memory to GPU
//checkCudaErrors(cudaMalloc((void**)&d_x,sizeof(VG_XVEC)* (NMAX + NTHREOPT2)));
checkCudaErrors(cudaMalloc((void**)&d_poss,sizeof(float)*(NMAX + NTHREOPT2)*3));
checkCudaErrors(cudaMalloc((void**)&d_force,sizeof(float)*(NMAX + NTHREOPT2)*3));
checkCudaErrors(cudaMalloc((void**)&d_side,3*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_sideh,3*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_amass,4*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_vl,n3*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_atypemat,20*sizeof(int)));
checkCudaErrors(cudaMalloc((void**)&d_ekin,sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_xs,sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_mtemp,sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_mpres,sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_ekin1,blocksPGrid*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_atype,sizeof(int)*(NMAX + NTHREOPT2)));
// Copy memory from CPU to GPU
//checkCudaErrors(cudaMemcpy(d_x,vec,sizeof(VG_XVEC)*((n + NTHREOPT2 - 1) / NTHREOPT2 * NTHREOPT2),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_poss,fposs,sizeof(float)*3*((n + NTHREOPT2 - 1) / NTHREOPT2 * NTHREOPT2),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_side,fside,sizeof(float)*3,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_sideh,fsideh,sizeof(float)*3,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_mtemp,&fmtemp,sizeof(float),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_mpres,&fmpres,sizeof(float),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_xs,&fxs,sizeof(float),cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_vl,fvl,sizeof(float)*n3,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_amass,fa_mass,sizeof(float)*4,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_atypemat,atype_mat,sizeof(int)*20,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_force,ffc,sizeof(float)*n*3,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_atype,auxatype,sizeof(int)*((n + NTHREOPT2 - 1) / NTHREOPT2 * NTHREOPT2),cudaMemcpyHostToDevice));
// Free the memory used to convert from Double to Float
free(ffc);
free(fvl);
free(fposs);
free(auxatype);
}
else{
#ifdef INTEROP
// Position
size_t vbosizepos;
checkCudaErrors(cudaGraphicsMapResources(1,&g_strucPossVBOCUDA,0));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&d_glCoord,&vbosizepos,g_strucPossVBOCUDA));
// Color
size_t vbosizecol;
checkCudaErrors(cudaGraphicsMapResources(1,&g_strucColorVBOCUDA,0));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&d_glColr,&vbosizecol,g_strucColorVBOCUDA));
#endif
}
///////Md_loop///////////////////////////////////////////////
#ifdef DP
#ifndef TIME_MEMORY
gettimeofday(&time_v,NULL);
*md_time0 = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
#endif
// for (int m=0;m<1000;m++){
md_loop_cuda<<<1,1>>>(n3,d_vl,d_xs,d_force,d_side,
n,nat,xmaxf,
d_amass,d_atypemat,hsqf,d_ekin1,
d_ekin,d_mtemp,d_mpres,ftscale,fnden,fvir,s_num,w_num,frtemp,flq,blocksPGrid,
md_step,d_sideh,d_glColr,d_poss,d_atype,d_glCoord);
//}
*m_clock+=md_step;
cudaDeviceSynchronize();
#ifndef TIME_MEMORY
gettimeofday(&time_v,NULL);
*md_time = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
#endif
#else
gettimeofday(&time_v,NULL);
*md_time0 = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
//for (int m=0;m<1000;m++){
for(int md_loop = 0; md_loop < md_step; md_loop++){
update_coor_kernel<<<BLOCKS,THREADS>>>(n3,d_vl,d_xs,d_force,d_side,d_poss,d_atype);
nacl_kernel_if2<<<grid, threads>>>(n, nat, xmaxf, d_force,d_poss,d_atype);
velforce_kernel<<<BLOCKS,THREADS>>>(n3,d_force,d_amass,d_vl,d_atypemat,hsqf,d_ekin1,d_atype,d_poss,d_sideh,d_glCoord);
reduction<<<1,threads>>>(d_ekin,d_mtemp,d_mpres,d_xs,ftscale,fnden,fvir,s_num,w_num,frtemp,flq,hsqf,d_ekin1,blocksPGrid);
}
#ifdef INTEROP
colorn4<<<colorgridn4,THREADS>>>(n4,d_vl,d_atypemat,d_glColr,d_atype); // Just update after the cycle. For color output.
#endif
//}
*m_clock+=md_step;
cudaDeviceSynchronize();
gettimeofday(&time_v,NULL);
*md_time = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
#endif
/////////////////Copy back to the CPU
//CUDA_SAFE_CALL(cudaMemcpy(forcef,d_force,sizeof(float)*n*3,cudaMemcpyDeviceToHost));
//CUDA_SAFE_CALL(cudaMemcpy(&fxs,d_xs,sizeof(float),cudaMemcpyDeviceToHost));
#ifdef TIME_MEMORY
gettimeofday(&time_v,NULL);
*md_time0 = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
#endif
#ifdef INTEROP
checkCudaErrors(cudaGraphicsUnmapResources(1,&g_strucPossVBOCUDA,0));
checkCudaErrors(cudaGraphicsUnmapResources(1,&g_strucColorVBOCUDA,0));
#endif
#ifndef INTEROP
checkCudaErrors(cudaMemcpy(vla,d_vl,n3*sizeof(float),cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(cord,d_poss,n3*sizeof(float),cudaMemcpyDeviceToHost));
for(p=0;p<n3;p++) *(vl+p) = (double) vla[p];
for(i=0;i<n;i++)for(j=0;j<3;j++) *(x+i*3+j) = (double)cord[j+i*3];
#endif
#ifdef TIME_MEMORY
gettimeofday(&time_v,NULL);
*md_time = (time_v.tv_sec + time_v.tv_usec / 1000000.0);
#endif
free(vla);
free(cord);
*firstmalloc = 1;
}
|
88c1c9f0a6756e1599303df61436eab848b400e2.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <cuml/common/cuml_allocator.hpp>
#include <functions/linearReg.cuh>
#include <raft/random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LinRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LinRegLossTest : public ::testing::TestWithParam<LinRegLossInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LinRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
raft::handle_t handle;
hipStream_t stream = handle.get_stream();
raft::allocate(in, len);
raft::allocate(out, 1);
raft::allocate(out_lasso, 1);
raft::allocate(out_ridge, 1);
raft::allocate(out_elasticnet, 1);
raft::allocate(out_grad, n_cols);
raft::allocate(out_lasso_grad, n_cols);
raft::allocate(out_ridge_grad, n_cols);
raft::allocate(out_elasticnet_grad, n_cols);
raft::allocate(out_ref, 1);
raft::allocate(out_lasso_ref, 1);
raft::allocate(out_ridge_ref, 1);
raft::allocate(out_elasticnet_ref, 1);
raft::allocate(out_grad_ref, n_cols);
raft::allocate(out_lasso_grad_ref, n_cols);
raft::allocate(out_ridge_grad_ref, n_cols);
raft::allocate(out_elasticnet_grad_ref, n_cols);
raft::allocate(labels, params.n_rows);
raft::allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
raft::update_device(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
raft::update_device(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
raft::update_device(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {1.854842};
raft::update_device(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {2.2088};
raft::update_device(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {1.9629};
raft::update_device(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {2.0858};
raft::update_device(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.56995, -3.12486};
raft::update_device(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.03005, -3.724866};
raft::update_device(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols,
stream);
T h_out_ridge_grad_ref[n_cols] = {-0.14995, -3.412866};
raft::update_device(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols,
stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.05995, -3.568866};
raft::update_device(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref,
n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
linearRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef, out,
penalty::NONE, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
linearRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_grad, penalty::NONE, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_lasso, penalty::L1, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
linearRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_lasso_grad, penalty::L1, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_ridge, penalty::L2, alpha, l1_ratio, stream);
linearRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_ridge_grad, penalty::L2, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet, penalty::ELASTICNET, alpha, l1_ratio, stream);
linearRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet_grad, penalty::ELASTICNET, alpha,
l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(coef));
}
void TearDown() override {
CUDA_CHECK(hipFree(in));
CUDA_CHECK(hipFree(out));
CUDA_CHECK(hipFree(out_lasso));
CUDA_CHECK(hipFree(out_ridge));
CUDA_CHECK(hipFree(out_elasticnet));
CUDA_CHECK(hipFree(out_grad));
CUDA_CHECK(hipFree(out_lasso_grad));
CUDA_CHECK(hipFree(out_ridge_grad));
CUDA_CHECK(hipFree(out_elasticnet_grad));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out_lasso_ref));
CUDA_CHECK(hipFree(out_ridge_ref));
CUDA_CHECK(hipFree(out_elasticnet_ref));
CUDA_CHECK(hipFree(out_grad_ref));
CUDA_CHECK(hipFree(out_lasso_grad_ref));
CUDA_CHECK(hipFree(out_ridge_grad_ref));
CUDA_CHECK(hipFree(out_elasticnet_grad_ref));
}
protected:
LinRegLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref,
*out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<LinRegLossInputs<float>> inputsf = {{0.01f, 3, 2, 6}};
const std::vector<LinRegLossInputs<double>> inputsd = {{0.01, 3, 2, 6}};
typedef LinRegLossTest<float> LinRegLossTestF;
TEST_P(LinRegLossTestF, Result) {
ASSERT_TRUE(
devArrMatch(out_ref, out, 1, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef LinRegLossTest<double> LinRegLossTestD;
TEST_P(LinRegLossTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestD,
::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
| 88c1c9f0a6756e1599303df61436eab848b400e2.cu | /*
* Copyright (c) 2020-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <cuml/common/cuml_allocator.hpp>
#include <functions/linearReg.cuh>
#include <raft/random/rng.cuh>
#include "test_utils.h"
namespace MLCommon {
namespace Functions {
template <typename T>
struct LinRegLossInputs {
T tolerance;
T n_rows;
T n_cols;
int len;
};
template <typename T>
class LinRegLossTest : public ::testing::TestWithParam<LinRegLossInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<LinRegLossInputs<T>>::GetParam();
int len = params.len;
int n_rows = params.n_rows;
int n_cols = params.n_cols;
T *labels, *coef;
raft::handle_t handle;
cudaStream_t stream = handle.get_stream();
raft::allocate(in, len);
raft::allocate(out, 1);
raft::allocate(out_lasso, 1);
raft::allocate(out_ridge, 1);
raft::allocate(out_elasticnet, 1);
raft::allocate(out_grad, n_cols);
raft::allocate(out_lasso_grad, n_cols);
raft::allocate(out_ridge_grad, n_cols);
raft::allocate(out_elasticnet_grad, n_cols);
raft::allocate(out_ref, 1);
raft::allocate(out_lasso_ref, 1);
raft::allocate(out_ridge_ref, 1);
raft::allocate(out_elasticnet_ref, 1);
raft::allocate(out_grad_ref, n_cols);
raft::allocate(out_lasso_grad_ref, n_cols);
raft::allocate(out_ridge_grad_ref, n_cols);
raft::allocate(out_elasticnet_grad_ref, n_cols);
raft::allocate(labels, params.n_rows);
raft::allocate(coef, params.n_cols);
T h_in[len] = {0.1, 0.35, -0.9, -1.4, 2.0, 3.1};
raft::update_device(in, h_in, len, stream);
T h_labels[n_rows] = {0.3, 2.0, -1.1};
raft::update_device(labels, h_labels, n_rows, stream);
T h_coef[n_cols] = {0.35, -0.24};
raft::update_device(coef, h_coef, n_cols, stream);
T h_out_ref[1] = {1.854842};
raft::update_device(out_ref, h_out_ref, 1, stream);
T h_out_lasso_ref[1] = {2.2088};
raft::update_device(out_lasso_ref, h_out_lasso_ref, 1, stream);
T h_out_ridge_ref[1] = {1.9629};
raft::update_device(out_ridge_ref, h_out_ridge_ref, 1, stream);
T h_out_elasticnet_ref[1] = {2.0858};
raft::update_device(out_elasticnet_ref, h_out_elasticnet_ref, 1, stream);
T h_out_grad_ref[n_cols] = {-0.56995, -3.12486};
raft::update_device(out_grad_ref, h_out_grad_ref, n_cols, stream);
T h_out_lasso_grad_ref[n_cols] = {0.03005, -3.724866};
raft::update_device(out_lasso_grad_ref, h_out_lasso_grad_ref, n_cols,
stream);
T h_out_ridge_grad_ref[n_cols] = {-0.14995, -3.412866};
raft::update_device(out_ridge_grad_ref, h_out_ridge_grad_ref, n_cols,
stream);
T h_out_elasticnet_grad_ref[n_cols] = {-0.05995, -3.568866};
raft::update_device(out_elasticnet_grad_ref, h_out_elasticnet_grad_ref,
n_cols, stream);
T alpha = 0.6;
T l1_ratio = 0.5;
linearRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef, out,
penalty::NONE, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
linearRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_grad, penalty::NONE, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_lasso, penalty::L1, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
linearRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_lasso_grad, penalty::L1, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_ridge, penalty::L2, alpha, l1_ratio, stream);
linearRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_ridge_grad, penalty::L2, alpha, l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
linearRegLoss(handle, in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet, penalty::ELASTICNET, alpha, l1_ratio, stream);
linearRegLossGrads(handle, in, params.n_rows, params.n_cols, labels, coef,
out_elasticnet_grad, penalty::ELASTICNET, alpha,
l1_ratio, stream);
raft::update_device(in, h_in, len, stream);
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(coef));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in));
CUDA_CHECK(cudaFree(out));
CUDA_CHECK(cudaFree(out_lasso));
CUDA_CHECK(cudaFree(out_ridge));
CUDA_CHECK(cudaFree(out_elasticnet));
CUDA_CHECK(cudaFree(out_grad));
CUDA_CHECK(cudaFree(out_lasso_grad));
CUDA_CHECK(cudaFree(out_ridge_grad));
CUDA_CHECK(cudaFree(out_elasticnet_grad));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out_lasso_ref));
CUDA_CHECK(cudaFree(out_ridge_ref));
CUDA_CHECK(cudaFree(out_elasticnet_ref));
CUDA_CHECK(cudaFree(out_grad_ref));
CUDA_CHECK(cudaFree(out_lasso_grad_ref));
CUDA_CHECK(cudaFree(out_ridge_grad_ref));
CUDA_CHECK(cudaFree(out_elasticnet_grad_ref));
}
protected:
LinRegLossInputs<T> params;
T *in;
T *out, *out_lasso, *out_ridge, *out_elasticnet;
T *out_ref, *out_lasso_ref, *out_ridge_ref, *out_elasticnet_ref;
T *out_grad, *out_lasso_grad, *out_ridge_grad, *out_elasticnet_grad;
T *out_grad_ref, *out_lasso_grad_ref, *out_ridge_grad_ref,
*out_elasticnet_grad_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<LinRegLossInputs<float>> inputsf = {{0.01f, 3, 2, 6}};
const std::vector<LinRegLossInputs<double>> inputsd = {{0.01, 3, 2, 6}};
typedef LinRegLossTest<float> LinRegLossTestF;
TEST_P(LinRegLossTestF, Result) {
ASSERT_TRUE(
devArrMatch(out_ref, out, 1, raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<float>(params.tolerance)));
}
typedef LinRegLossTest<double> LinRegLossTestD;
TEST_P(LinRegLossTestD, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_ref, out_lasso, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_ref, out_ridge, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_ref, out_elasticnet, 1,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_grad_ref, out_grad, params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_lasso_grad_ref, out_lasso_grad, params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ridge_grad_ref, out_ridge_grad, params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_elasticnet_grad_ref, out_elasticnet_grad,
params.n_cols,
raft::CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(LinRegLossTests, LinRegLossTestD,
::testing::ValuesIn(inputsd));
} // end namespace Functions
} // end namespace MLCommon
|
938eb80fbd188873d45600de6bc00b3500832e84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define PRECISION 0.00001
#define TAM_BLOCO 8
#define uN 5.0
#define uS 5.0
#define uW 0.0
#define uE 10.0
#define GPU_ZERO 0
#define GPU_ONE 1
//Variveis CPU
double h_h1, h_h2;
double h_denominador1, h_denominador2;
double *h0_m, *d0_m, *h1_m, *d1_m;
double h_parcial1, h_parcial2;
int h_dimensaoX, h_dimensaoY, laps = 0, i, j;
int h_dimensaoX_mat0, h_dimensaoY_mat0, h_dimensaoX_mat1, h_dimensaoY_mat1;
//Variveis GPU
__constant__ double omega = 1.5;
__constant__ double d_h1, d_h2;
__constant__ double d_denominador1, d_denominador2;
__constant__ int d_dimensaoX, d_dimensaoY;
__constant__ double d_parcial1, d_parcial2;
__constant__ int d_dimensaoX_mat0, d_dimensaoY_mat0;
__constant__ int d_dimensaoX_mat1, d_dimensaoY_mat1;
FILE *arquivo;
clock_t start, end;
double tempo;
//Funes da CPU
//Funcao que imprime a matriz no arquivo de saida
void printMat(){
int i, j;
for(i = 0; i < h_dimensaoX_mat1; i++){
for(j = 0; j < h_dimensaoY_mat1; j++){
fprintf(arquivo, "%lf", h1_m[i * h_dimensaoY_mat1 + j]);
fprintf(arquivo, " ");
}
for(j = 0; j < h_dimensaoY_mat0; j++){
fprintf(arquivo, "%lf", h0_m[i * h_dimensaoY_mat0 + j]);
if(j != h_dimensaoY_mat0 - 1) fprintf(arquivo, " ");
}
if(i != h_dimensaoX_mat1 - 1)
fprintf(arquivo, "\n");
}
}
//Funcao que inicializa a matriz com os valores de contorno especificados pelo problema
void setupM(){
int i,j;
for(i = 0; i < h_dimensaoX_mat0; i++){
for(j = 0; j < h_dimensaoY_mat0; j++){
if(i == 0){
h0_m[i * h_dimensaoY_mat0 + j] = uN;
}else if(i == (h_dimensaoX_mat0 - 1)){
h0_m[i * h_dimensaoY_mat0 + j] = uS;
}else if(j == 0){
h0_m[i * h_dimensaoY_mat0 + j] = uW;
}else if(j == h_dimensaoY_mat0 - 1){
h0_m[i * h_dimensaoY_mat0 + j] = uE;
}
}
}
for(i = 0; i < h_dimensaoX_mat1; i++){
for(j = 0; j < h_dimensaoY_mat1; j++){
if(i == 0){
h1_m[i * h_dimensaoY_mat1 + j] = uN;
}else if(i == (h_dimensaoX_mat1 - 1)){
h1_m[i * h_dimensaoY_mat1 + j] = uS;
}else if(j == 0){
h1_m[i * h_dimensaoY_mat1 + j] = uW;
}
}
}
}
//Funes da GPU
//Funcoes "a" e "b" especificada pelo problema
__device__ double a(int i, int j){
double x = i * d_h1;
double y = j * d_h2;
return 500 * x * (1 - x) * (0.5 - y);
}
__device__ double b(int i, int j){
double x = i * d_h1;
double y = j * d_h2;
return 500 * y * (1 - y) * (x - 0.5);
}
//Funcoes "n", "s", "w", "e" especificadas pelo problema
__device__ double n(int i, int j){
return (d_parcial2 - (d_h2 * b(i,j))/d_denominador2);
}
__device__ double s(int i, int j){
return (d_parcial2 + (d_h2 * b(i,j))/d_denominador2);
}
__device__ double e(int i, int j){
return (d_parcial1 - (d_h1 * a(i,j))/d_denominador1);
}
__device__ double w(int i, int j){
return (d_parcial1 + (d_h1 * a(i,j))/d_denominador1);
}
//Funcao que faz a media ponderada dos valores vizinhos ao ponto que est sendo atualizado
__device__ double somaDosPontosVizinhos(int i, int j, int dimensaoY, double *m){
double temp = 0;
temp += w(i,j) * m[(i - 1) * dimensaoY + j];
temp += e(i,j) * m[(i + 1) * dimensaoY + j];
temp += s(i,j) * m[i * dimensaoY + (j - 1)];
temp += n(i,j) * m[i * dimensaoY + (j + 1)];
return temp;
}
//Kernels principais do programa. Cada um trabalho em um conjunto de pontos da matriz
//fazendo uma media ponderada entre o valor atual do ponto que est sendo analisado e
//seus quatro pontos adjacentes. O quanto cada valor vai pesar determinado pelo mega
//da funcao que, nesse caso, fixo
__global__ void vermelhos(double *m, int device){
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(device == 1){
if(tidx != 0 && tidx < (d_dimensaoX_mat1 - 1) && tidy != 0 && tidy < (d_dimensaoY_mat1 - 1)){
if((tidx + tidy) % 2 == 0){
m[tidx * d_dimensaoY_mat1 + tidy] *= (1 - omega);
m[tidx * d_dimensaoY_mat1 + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, d_dimensaoY_mat1, m);
}
}
}else{
if(tidx != 0 && tidx < (d_dimensaoX_mat0 - 1) && tidy !=0 && tidy < (d_dimensaoY_mat0 - 1)){
if((tidx + tidy) % 2 == 0){
m[tidx * d_dimensaoY_mat0 + tidy] *= (1 - omega);
m[tidx * d_dimensaoY_mat0 + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, d_dimensaoY_mat0, m);
}
}
}
}
__global__ void azuis(double *m, int device){
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(device == 1){
if(tidx != 0 && tidx < (d_dimensaoX_mat1 - 1) && tidy != 0 && tidy < (d_dimensaoY_mat1 - 1)){
if((tidx + tidy) % 2 == 1){
m[tidx * d_dimensaoY_mat1 + tidy] *= (1 - omega);
m[tidx * d_dimensaoY_mat1 + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, d_dimensaoY_mat1, m);
}
}
}else{
if(tidx != 0 && tidx < (d_dimensaoX_mat0 - 1) && tidy !=0 && tidy < (d_dimensaoY_mat0 - 1)){
if((tidx + tidy) % 2 == 1){
m[tidx * d_dimensaoY_mat0 + tidy] *= (1 - omega);
m[tidx * d_dimensaoY_mat0 + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, d_dimensaoY_mat0, m);
}
}
}
}
int main(int argc, char** argv){
//Especificacoes iniciais para garantir que o programa ser rodado com as
//condicoes iniciais corretas
if(argc != 4){
printf("Nmero incorreto de parmetros:\n");
printf("Insira as dimensoes e a quantidade de iteraes\n");
printf("\tUtilize o formato: %s <Dimensao X> <Dimensao Y> <Iteraes>\n", argv[0]);
exit(-1);
}
//Inicializando todos os valores necessrios para transferir para a GPU e para realizar
//os calculos do programa
h_dimensaoX = atoi(argv[1]);
h_dimensaoY = atoi(argv[2]);
laps = atoi(argv[3]);
h_h1 = 1.0/(h_dimensaoX + 1);
h_h2 = 1.0/(h_dimensaoY + 1);
h_dimensaoX += 2;
h_dimensaoY += 2;
h_denominador1 = 4*(1 + (pow(h_h1,2)/pow(h_h2,2)));
h_denominador2 = 4*(1 + (pow(h_h2,2)/pow(h_h1,2)));
h_parcial1 = 2/h_denominador1;
h_parcial2 = 2/h_denominador2;
h_dimensaoX_mat0 = h_dimensaoX;
h_dimensaoY_mat0 = (75 * h_dimensaoY)/100;
h_dimensaoX_mat1 = h_dimensaoX;
h_dimensaoY_mat1 = h_dimensaoY - h_dimensaoY_mat0;
//Alocando a matriz na CPU e inicializando
h0_m = (double *) calloc(h_dimensaoX_mat0 * h_dimensaoY_mat0, sizeof(double));
h1_m = (double *) calloc(h_dimensaoX_mat1 * h_dimensaoY_mat1, sizeof(double));
setupM();
//Escolhendo a GPU 0 para transferir dados
hipSetDevice(GPU_ZERO);
//Alocando a matriz na GPU 0
hipMalloc(&d0_m, h_dimensaoX_mat0 * h_dimensaoY_mat0 * sizeof(double));
//Transferindo as informaes necessrias para a GPU 0
hipMemcpy(d0_m, h0_m, h_dimensaoX_mat0 * h_dimensaoY_mat0 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_parcial1, &h_parcial1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_parcial2, &h_parcial2, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoX_mat0, &h_dimensaoX_mat0, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoY_mat0, &h_dimensaoY_mat0, sizeof(double), 0, hipMemcpyHostToDevice);
//Escolhendo a GPU 1 para transferir dados
hipSetDevice(GPU_ONE);
//Alocando a matriz na GPU 1
hipMalloc(&d1_m, h_dimensaoX_mat1 * h_dimensaoY_mat1 * sizeof(double));
//Transferindo as informaes necessrias para a GPU 1
hipMemcpy(d1_m, h1_m, h_dimensaoX_mat1 * h_dimensaoY_mat1 * sizeof(double), hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_parcial1, &h_parcial1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_parcial2, &h_parcial2, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoX_mat1, &h_dimensaoX_mat1, sizeof(double), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(d_dimensaoY_mat1, &h_dimensaoY_mat1, sizeof(double), 0, hipMemcpyHostToDevice);
//Iniciando a contagem do tempo
start = clock();
//Calculando a quantidade de blocos e threads que serao lancados
dim3 nthreads(TAM_BLOCO,TAM_BLOCO);
dim3 nblocos0((h_dimensaoX_mat0 + nthreads.x - 1)/nthreads.x, (h_dimensaoY_mat0 + nthreads.y - 1)/nthreads.y);
dim3 nblocos1((h_dimensaoX_mat1 + nthreads.x - 1)/nthreads.x, (h_dimensaoY_mat1 + nthreads.y - 1)/nthreads.y);
//Fazendo os clculos
for(i = 0; i < laps; i++){
hipSetDevice(GPU_ZERO);
hipLaunchKernelGGL(( vermelhos), dim3(nblocos0), dim3(nthreads), 0, 0, d0_m, GPU_ZERO);
hipLaunchKernelGGL(( azuis), dim3(nblocos0), dim3(nthreads), 0, 0, d0_m, GPU_ZERO);
//gpuErrchk( hipPeekAtLastError() );
hipSetDevice(GPU_ONE);
hipLaunchKernelGGL(( vermelhos), dim3(nblocos1), dim3(nthreads), 0, 0, d1_m, GPU_ONE);
hipLaunchKernelGGL(( azuis), dim3(nblocos1), dim3(nthreads), 0, 0, d1_m, GPU_ONE);
}
hipSetDevice(GPU_ZERO);
//Trazendo a matriz de volta para a CPU
hipMemcpy(h0_m, d0_m, h_dimensaoX_mat0 * h_dimensaoY_mat0 * sizeof(double), hipMemcpyDeviceToHost);
//Reseta a GPU para liberar todos os recursos
hipDeviceReset();
hipSetDevice(GPU_ONE);
//Trazendo a matriz de volta para a CPU
hipMemcpy(h1_m, d1_m, h_dimensaoX_mat1 * h_dimensaoY_mat1 * sizeof(double), hipMemcpyDeviceToHost);
//Reseta a GPU para liberar todos os recursos
hipDeviceReset();
//Imprimindo a matriz no arquivo e fechando-o
arquivo = fopen("sample.txt", "w");
printMat();
fclose(arquivo);
//Termina de calcular o tempo que demorou o programa
end = clock();
tempo = ((double) (end - start))/CLOCKS_PER_SEC;
printf("%lf;", tempo);
return 0;
}
| 938eb80fbd188873d45600de6bc00b3500832e84.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define PRECISION 0.00001
#define TAM_BLOCO 8
#define uN 5.0
#define uS 5.0
#define uW 0.0
#define uE 10.0
#define GPU_ZERO 0
#define GPU_ONE 1
//Variáveis CPU
double h_h1, h_h2;
double h_denominador1, h_denominador2;
double *h0_m, *d0_m, *h1_m, *d1_m;
double h_parcial1, h_parcial2;
int h_dimensaoX, h_dimensaoY, laps = 0, i, j;
int h_dimensaoX_mat0, h_dimensaoY_mat0, h_dimensaoX_mat1, h_dimensaoY_mat1;
//Variáveis GPU
__constant__ double omega = 1.5;
__constant__ double d_h1, d_h2;
__constant__ double d_denominador1, d_denominador2;
__constant__ int d_dimensaoX, d_dimensaoY;
__constant__ double d_parcial1, d_parcial2;
__constant__ int d_dimensaoX_mat0, d_dimensaoY_mat0;
__constant__ int d_dimensaoX_mat1, d_dimensaoY_mat1;
FILE *arquivo;
clock_t start, end;
double tempo;
//Funções da CPU
//Funcao que imprime a matriz no arquivo de saida
void printMat(){
int i, j;
for(i = 0; i < h_dimensaoX_mat1; i++){
for(j = 0; j < h_dimensaoY_mat1; j++){
fprintf(arquivo, "%lf", h1_m[i * h_dimensaoY_mat1 + j]);
fprintf(arquivo, " ");
}
for(j = 0; j < h_dimensaoY_mat0; j++){
fprintf(arquivo, "%lf", h0_m[i * h_dimensaoY_mat0 + j]);
if(j != h_dimensaoY_mat0 - 1) fprintf(arquivo, " ");
}
if(i != h_dimensaoX_mat1 - 1)
fprintf(arquivo, "\n");
}
}
//Funcao que inicializa a matriz com os valores de contorno especificados pelo problema
void setupM(){
int i,j;
for(i = 0; i < h_dimensaoX_mat0; i++){
for(j = 0; j < h_dimensaoY_mat0; j++){
if(i == 0){
h0_m[i * h_dimensaoY_mat0 + j] = uN;
}else if(i == (h_dimensaoX_mat0 - 1)){
h0_m[i * h_dimensaoY_mat0 + j] = uS;
}else if(j == 0){
h0_m[i * h_dimensaoY_mat0 + j] = uW;
}else if(j == h_dimensaoY_mat0 - 1){
h0_m[i * h_dimensaoY_mat0 + j] = uE;
}
}
}
for(i = 0; i < h_dimensaoX_mat1; i++){
for(j = 0; j < h_dimensaoY_mat1; j++){
if(i == 0){
h1_m[i * h_dimensaoY_mat1 + j] = uN;
}else if(i == (h_dimensaoX_mat1 - 1)){
h1_m[i * h_dimensaoY_mat1 + j] = uS;
}else if(j == 0){
h1_m[i * h_dimensaoY_mat1 + j] = uW;
}
}
}
}
//Funções da GPU
//Funcoes "a" e "b" especificada pelo problema
__device__ double a(int i, int j){
double x = i * d_h1;
double y = j * d_h2;
return 500 * x * (1 - x) * (0.5 - y);
}
__device__ double b(int i, int j){
double x = i * d_h1;
double y = j * d_h2;
return 500 * y * (1 - y) * (x - 0.5);
}
//Funcoes "n", "s", "w", "e" especificadas pelo problema
__device__ double n(int i, int j){
return (d_parcial2 - (d_h2 * b(i,j))/d_denominador2);
}
__device__ double s(int i, int j){
return (d_parcial2 + (d_h2 * b(i,j))/d_denominador2);
}
__device__ double e(int i, int j){
return (d_parcial1 - (d_h1 * a(i,j))/d_denominador1);
}
__device__ double w(int i, int j){
return (d_parcial1 + (d_h1 * a(i,j))/d_denominador1);
}
//Funcao que faz a media ponderada dos valores vizinhos ao ponto que está sendo atualizado
__device__ double somaDosPontosVizinhos(int i, int j, int dimensaoY, double *m){
double temp = 0;
temp += w(i,j) * m[(i - 1) * dimensaoY + j];
temp += e(i,j) * m[(i + 1) * dimensaoY + j];
temp += s(i,j) * m[i * dimensaoY + (j - 1)];
temp += n(i,j) * m[i * dimensaoY + (j + 1)];
return temp;
}
//Kernels principais do programa. Cada um trabalho em um conjunto de pontos da matriz
//fazendo uma media ponderada entre o valor atual do ponto que está sendo analisado e
//seus quatro pontos adjacentes. O quanto cada valor vai pesar é determinado pelo ômega
//da funcao que, nesse caso, é fixo
__global__ void vermelhos(double *m, int device){
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(device == 1){
if(tidx != 0 && tidx < (d_dimensaoX_mat1 - 1) && tidy != 0 && tidy < (d_dimensaoY_mat1 - 1)){
if((tidx + tidy) % 2 == 0){
m[tidx * d_dimensaoY_mat1 + tidy] *= (1 - omega);
m[tidx * d_dimensaoY_mat1 + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, d_dimensaoY_mat1, m);
}
}
}else{
if(tidx != 0 && tidx < (d_dimensaoX_mat0 - 1) && tidy !=0 && tidy < (d_dimensaoY_mat0 - 1)){
if((tidx + tidy) % 2 == 0){
m[tidx * d_dimensaoY_mat0 + tidy] *= (1 - omega);
m[tidx * d_dimensaoY_mat0 + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, d_dimensaoY_mat0, m);
}
}
}
}
__global__ void azuis(double *m, int device){
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y + threadIdx.y;
if(device == 1){
if(tidx != 0 && tidx < (d_dimensaoX_mat1 - 1) && tidy != 0 && tidy < (d_dimensaoY_mat1 - 1)){
if((tidx + tidy) % 2 == 1){
m[tidx * d_dimensaoY_mat1 + tidy] *= (1 - omega);
m[tidx * d_dimensaoY_mat1 + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, d_dimensaoY_mat1, m);
}
}
}else{
if(tidx != 0 && tidx < (d_dimensaoX_mat0 - 1) && tidy !=0 && tidy < (d_dimensaoY_mat0 - 1)){
if((tidx + tidy) % 2 == 1){
m[tidx * d_dimensaoY_mat0 + tidy] *= (1 - omega);
m[tidx * d_dimensaoY_mat0 + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, d_dimensaoY_mat0, m);
}
}
}
}
int main(int argc, char** argv){
//Especificacoes iniciais para garantir que o programa será rodado com as
//condicoes iniciais corretas
if(argc != 4){
printf("Número incorreto de parâmetros:\n");
printf("Insira as dimensoes e a quantidade de iterações\n");
printf("\tUtilize o formato: %s <Dimensao X> <Dimensao Y> <Iterações>\n", argv[0]);
exit(-1);
}
//Inicializando todos os valores necessários para transferir para a GPU e para realizar
//os calculos do programa
h_dimensaoX = atoi(argv[1]);
h_dimensaoY = atoi(argv[2]);
laps = atoi(argv[3]);
h_h1 = 1.0/(h_dimensaoX + 1);
h_h2 = 1.0/(h_dimensaoY + 1);
h_dimensaoX += 2;
h_dimensaoY += 2;
h_denominador1 = 4*(1 + (pow(h_h1,2)/pow(h_h2,2)));
h_denominador2 = 4*(1 + (pow(h_h2,2)/pow(h_h1,2)));
h_parcial1 = 2/h_denominador1;
h_parcial2 = 2/h_denominador2;
h_dimensaoX_mat0 = h_dimensaoX;
h_dimensaoY_mat0 = (75 * h_dimensaoY)/100;
h_dimensaoX_mat1 = h_dimensaoX;
h_dimensaoY_mat1 = h_dimensaoY - h_dimensaoY_mat0;
//Alocando a matriz na CPU e inicializando
h0_m = (double *) calloc(h_dimensaoX_mat0 * h_dimensaoY_mat0, sizeof(double));
h1_m = (double *) calloc(h_dimensaoX_mat1 * h_dimensaoY_mat1, sizeof(double));
setupM();
//Escolhendo a GPU 0 para transferir dados
cudaSetDevice(GPU_ZERO);
//Alocando a matriz na GPU 0
cudaMalloc(&d0_m, h_dimensaoX_mat0 * h_dimensaoY_mat0 * sizeof(double));
//Transferindo as informações necessárias para a GPU 0
cudaMemcpy(d0_m, h0_m, h_dimensaoX_mat0 * h_dimensaoY_mat0 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_parcial1, &h_parcial1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_parcial2, &h_parcial2, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoX_mat0, &h_dimensaoX_mat0, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoY_mat0, &h_dimensaoY_mat0, sizeof(double), 0, cudaMemcpyHostToDevice);
//Escolhendo a GPU 1 para transferir dados
cudaSetDevice(GPU_ONE);
//Alocando a matriz na GPU 1
cudaMalloc(&d1_m, h_dimensaoX_mat1 * h_dimensaoY_mat1 * sizeof(double));
//Transferindo as informações necessárias para a GPU 1
cudaMemcpy(d1_m, h1_m, h_dimensaoX_mat1 * h_dimensaoY_mat1 * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_parcial1, &h_parcial1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_parcial2, &h_parcial2, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoX_mat1, &h_dimensaoX_mat1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_dimensaoY_mat1, &h_dimensaoY_mat1, sizeof(double), 0, cudaMemcpyHostToDevice);
//Iniciando a contagem do tempo
start = clock();
//Calculando a quantidade de blocos e threads que serao lancados
dim3 nthreads(TAM_BLOCO,TAM_BLOCO);
dim3 nblocos0((h_dimensaoX_mat0 + nthreads.x - 1)/nthreads.x, (h_dimensaoY_mat0 + nthreads.y - 1)/nthreads.y);
dim3 nblocos1((h_dimensaoX_mat1 + nthreads.x - 1)/nthreads.x, (h_dimensaoY_mat1 + nthreads.y - 1)/nthreads.y);
//Fazendo os cálculos
for(i = 0; i < laps; i++){
cudaSetDevice(GPU_ZERO);
vermelhos<<<nblocos0, nthreads>>>(d0_m, GPU_ZERO);
azuis<<<nblocos0, nthreads>>>(d0_m, GPU_ZERO);
//gpuErrchk( cudaPeekAtLastError() );
cudaSetDevice(GPU_ONE);
vermelhos<<<nblocos1, nthreads>>>(d1_m, GPU_ONE);
azuis<<<nblocos1, nthreads>>>(d1_m, GPU_ONE);
}
cudaSetDevice(GPU_ZERO);
//Trazendo a matriz de volta para a CPU
cudaMemcpy(h0_m, d0_m, h_dimensaoX_mat0 * h_dimensaoY_mat0 * sizeof(double), cudaMemcpyDeviceToHost);
//Reseta a GPU para liberar todos os recursos
cudaDeviceReset();
cudaSetDevice(GPU_ONE);
//Trazendo a matriz de volta para a CPU
cudaMemcpy(h1_m, d1_m, h_dimensaoX_mat1 * h_dimensaoY_mat1 * sizeof(double), cudaMemcpyDeviceToHost);
//Reseta a GPU para liberar todos os recursos
cudaDeviceReset();
//Imprimindo a matriz no arquivo e fechando-o
arquivo = fopen("sample.txt", "w");
printMat();
fclose(arquivo);
//Termina de calcular o tempo que demorou o programa
end = clock();
tempo = ((double) (end - start))/CLOCKS_PER_SEC;
printf("%lf;", tempo);
return 0;
}
|
c233924067cdf3ba36e12aa3f42db70d0df2ad20.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "utils/copygen.hpp"
#include "utils/boxutils.hpp"
#include "utils/scan_block.hpp"
#include "octree_iterator.hpp"
namespace pcl
{
namespace device
{
using PointType = OctreeImpl::PointType;
template<typename RadiusStrategy, typename FetchStrategy>
struct Batch : public RadiusStrategy, public FetchStrategy
{
const int *indices;
PtrStep<float> points;
OctreeGlobalWithBox octree;
int max_results;
mutable int* output;
mutable int* output_sizes;
};
struct DirectQuery
{
PtrSz<PointType> queries;
__device__ __forceinline__ float3 fetch(int query_index) const
{
PointType q = queries.data[query_index];
return make_float3(q.x, q.y, q.z);
}
};
struct IndicesQuery : public DirectQuery
{
const int* queries_indices;
__device__ __forceinline__ float3 fetch(int query_index) const
{
PointType q = queries[queries_indices[query_index]];
return make_float3(q.x, q.y, q.z);
}
};
struct SharedRadius
{
float radius;
__device__ __forceinline__ float getRadius(int /*index*/) const { return radius; }
__device__ __forceinline__ float bradcastRadius2(float* /*ptr*/, bool /*active*/, float& /*radius_reg*/) const
{
return radius * radius;
}
};
struct IndividualRadius
{
const float* radiuses;
__device__ __forceinline__ float getRadius(int index) const { return radiuses[index]; }
__device__ __forceinline__ float bradcastRadius2(float* ptr, bool active, float& radius_reg) const
{
if (active)
*ptr = radius_reg * radius_reg;
return *ptr;
}
};
struct KernelPolicy
{
enum
{
CTA_SIZE = 512,
WARP_SIZE = 32,
WARPS_COUNT = CTA_SIZE/WARP_SIZE,
MAX_LEVELS_PLUS_ROOT = 11,
CHECK_FLAG = 1 << 31
};
struct SmemStorage
{
volatile int per_warp_buffer[WARPS_COUNT];
volatile int cta_buffer[CTA_SIZE];
};
};
__shared__ KernelPolicy::SmemStorage storage;
template<typename BatchType>
struct Warp_radiusSearch
{
public:
using OctreeIterator = OctreeIteratorDeviceNS;
const BatchType& batch;
OctreeIterator iterator;
int found_count;
int query_index;
float3 query;
float radius;
__device__ __forceinline__ Warp_radiusSearch(const BatchType& batch_arg, int query_index_arg)
: batch(batch_arg), iterator(/**/batch.octree/*storage.paths*/), found_count(0), query_index(query_index_arg){}
__device__ __forceinline__ void launch(bool active)
{
if (active)
{
query = batch.fetch(query_index);
radius = batch.getRadius(query_index);
}
else
query_index = -1;
while(__any_sync(0xFFFFFFFF, active))
{
int leaf = -1;
if (active)
leaf = examineNode(iterator);
processLeaf(leaf);
active = active && iterator.level >= 0 && found_count < batch.max_results;
}
if (query_index != -1)
batch.output_sizes[query_index] = found_count;
}
private:
__device__ __forceinline__ int examineNode(OctreeIterator& iterator)
{
using namespace pcl::gpu;
int node_idx = *iterator;
int code = batch.octree.codes[node_idx];
float3 node_minp = batch.octree.minp;
float3 node_maxp = batch.octree.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, query, radius))
{
++iterator;
return -1;
}
if (checkIfNodeInsideSphere(node_minp, node_maxp, query, radius))
{
++iterator;
return node_idx; //return node to copy
}
//need to go to next level
int node = batch.octree.nodes[node_idx];
int children_mask = node & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
++iterator;
return (node_idx | KernelPolicy::CHECK_FLAG); // return node to check
}
//goto next level
int first = node >> 8;
int len = __popc(children_mask);
iterator.gotoNextLevel(first, len);
return -1;
};
__device__ __forceinline__ void processLeaf(int leaf)
{
int mask = __ballot_sync(0xFFFFFFFF, leaf != -1);
while(mask)
{
unsigned int laneId = Warp::laneId();
unsigned int warpId = Warp::id();
int active_lane = __ffs(mask) - 1; //[0..31]
mask &= ~(1 << active_lane);
//broadcast active_found_count
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = found_count;
int active_found_count = storage.per_warp_buffer[warpId];
int node_idx = leaf & ~KernelPolicy::CHECK_FLAG;
//broadcast beg
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = batch.octree.begs[node_idx];
int beg = storage.per_warp_buffer[warpId];
//broadcast end
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = batch.octree.ends[node_idx];
int end = storage.per_warp_buffer[warpId];
//broadcast active_query_index
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = query_index;
int active_query_index = storage.per_warp_buffer[warpId];
int length = end - beg;
int *out = batch.output + active_query_index * batch.max_results + active_found_count;
int length_left = batch.max_results - active_found_count;
int test = __any_sync(0xFFFFFFFF, active_lane == laneId && (leaf & KernelPolicy::CHECK_FLAG));
if (test)
{
float3 active_query;
//broadcast warp_query
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.x);
active_query.x = __int_as_float(storage.per_warp_buffer[warpId]);
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.y);
active_query.y = __int_as_float(storage.per_warp_buffer[warpId]);
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.z);
active_query.z = __int_as_float(storage.per_warp_buffer[warpId]);
float radius2 = batch.bradcastRadius2((float*)&storage.per_warp_buffer[warpId], (active_lane == laneId), radius);
length = TestWarpKernel(beg, active_query, radius2, length, out, length_left);
}
else
{
length = min(length, length_left);
Warp::copy(batch.indices + beg, batch.indices + beg + length, out);
}
if (active_lane == laneId)
found_count += length;
}
}
__device__ __forceinline__ int TestWarpKernel(int beg, const float3& active_query, float radius2, int length, int* out, int length_left)
{
unsigned int idx = Warp::laneId();
int last_threadIdx = threadIdx.x - idx + 31;
int total_new = 0;
for(;;)
{
int take = 0;
if (idx < length)
{
float dx = batch.points.ptr(0)[beg + idx] - active_query.x;
float dy = batch.points.ptr(1)[beg + idx] - active_query.y;
float dz = batch.points.ptr(2)[beg + idx] - active_query.z;
float d2 = dx * dx + dy * dy + dz * dz;
if (d2 < radius2)
take = 1;
}
storage.cta_buffer[threadIdx.x] = take;
int offset = scan_warp<exclusive>(storage.cta_buffer);
//ensure that we copy
bool out_of_bounds = (offset + total_new) >= length_left;
if (take && !out_of_bounds)
out[offset] = batch.indices[beg + idx];
int new_nodes = storage.cta_buffer[last_threadIdx];
idx += Warp::STRIDE;
total_new += new_nodes;
out += new_nodes;
if (__all_sync(0xFFFFFFFF, idx >= length) || __any_sync(0xFFFFFFFF, out_of_bounds) || total_new == length_left)
break;
}
return min(total_new, length_left);
}
};
template<typename BatchType>
__global__ void KernelRS(const BatchType batch)
{
int query_index = blockIdx.x * blockDim.x + threadIdx.x;
bool active = query_index < batch.queries.size;
if (__all_sync(0xFFFFFFFF, active == false))
return;
Warp_radiusSearch<BatchType> search(batch, query_index);
search.launch(active);
}
}
}
template<typename BatchType>
void pcl::device::OctreeImpl::radiusSearchEx(BatchType& batch, const Queries& queries, NeighborIndices& results)
{
batch.indices = indices;
batch.octree = octreeGlobal;
batch.max_results = results.max_elems;
batch.output = results.data;
batch.output_sizes = results.sizes;
batch.points = points_sorted;
cudaSafeCall( hipFuncSetCacheConfig(KernelRS<BatchType>, hipFuncCachePreferL1) );
int block = KernelPolicy::CTA_SIZE;
int grid = divUp((int)batch.queries.size, block);
hipLaunchKernelGGL(( KernelRS), dim3(grid), dim3(block), 0, 0, batch);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, float radius, NeighborIndices& results)
{
using BatchType = Batch<SharedRadius, DirectQuery>;
BatchType batch;
batch.radius = radius;
batch.queries = queries;
radiusSearchEx(batch, queries, results);
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, const Radiuses& radiuses, NeighborIndices& results)
{
using BatchType = Batch<IndividualRadius, DirectQuery>;
BatchType batch;
batch.radiuses = radiuses;
batch.queries = queries;
radiusSearchEx(batch, queries, results);
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, const Indices& indices, float radius, NeighborIndices& results)
{
using BatchType = Batch<SharedRadius, IndicesQuery>;
BatchType batch;
batch.radius = radius;
batch.queries = queries;
batch.queries_indices = indices;
batch.queries.size = indices.size();
radiusSearchEx(batch, queries, results);
}
| c233924067cdf3ba36e12aa3f42db70d0df2ad20.cu | /*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include "internal.hpp"
#include "pcl/gpu/utils/device/warp.hpp"
#include "utils/copygen.hpp"
#include "utils/boxutils.hpp"
#include "utils/scan_block.hpp"
#include "octree_iterator.hpp"
namespace pcl
{
namespace device
{
using PointType = OctreeImpl::PointType;
template<typename RadiusStrategy, typename FetchStrategy>
struct Batch : public RadiusStrategy, public FetchStrategy
{
const int *indices;
PtrStep<float> points;
OctreeGlobalWithBox octree;
int max_results;
mutable int* output;
mutable int* output_sizes;
};
struct DirectQuery
{
PtrSz<PointType> queries;
__device__ __forceinline__ float3 fetch(int query_index) const
{
PointType q = queries.data[query_index];
return make_float3(q.x, q.y, q.z);
}
};
struct IndicesQuery : public DirectQuery
{
const int* queries_indices;
__device__ __forceinline__ float3 fetch(int query_index) const
{
PointType q = queries[queries_indices[query_index]];
return make_float3(q.x, q.y, q.z);
}
};
struct SharedRadius
{
float radius;
__device__ __forceinline__ float getRadius(int /*index*/) const { return radius; }
__device__ __forceinline__ float bradcastRadius2(float* /*ptr*/, bool /*active*/, float& /*radius_reg*/) const
{
return radius * radius;
}
};
struct IndividualRadius
{
const float* radiuses;
__device__ __forceinline__ float getRadius(int index) const { return radiuses[index]; }
__device__ __forceinline__ float bradcastRadius2(float* ptr, bool active, float& radius_reg) const
{
if (active)
*ptr = radius_reg * radius_reg;
return *ptr;
}
};
struct KernelPolicy
{
enum
{
CTA_SIZE = 512,
WARP_SIZE = 32,
WARPS_COUNT = CTA_SIZE/WARP_SIZE,
MAX_LEVELS_PLUS_ROOT = 11,
CHECK_FLAG = 1 << 31
};
struct SmemStorage
{
volatile int per_warp_buffer[WARPS_COUNT];
volatile int cta_buffer[CTA_SIZE];
};
};
__shared__ KernelPolicy::SmemStorage storage;
template<typename BatchType>
struct Warp_radiusSearch
{
public:
using OctreeIterator = OctreeIteratorDeviceNS;
const BatchType& batch;
OctreeIterator iterator;
int found_count;
int query_index;
float3 query;
float radius;
__device__ __forceinline__ Warp_radiusSearch(const BatchType& batch_arg, int query_index_arg)
: batch(batch_arg), iterator(/**/batch.octree/*storage.paths*/), found_count(0), query_index(query_index_arg){}
__device__ __forceinline__ void launch(bool active)
{
if (active)
{
query = batch.fetch(query_index);
radius = batch.getRadius(query_index);
}
else
query_index = -1;
while(__any_sync(0xFFFFFFFF, active))
{
int leaf = -1;
if (active)
leaf = examineNode(iterator);
processLeaf(leaf);
active = active && iterator.level >= 0 && found_count < batch.max_results;
}
if (query_index != -1)
batch.output_sizes[query_index] = found_count;
}
private:
__device__ __forceinline__ int examineNode(OctreeIterator& iterator)
{
using namespace pcl::gpu;
int node_idx = *iterator;
int code = batch.octree.codes[node_idx];
float3 node_minp = batch.octree.minp;
float3 node_maxp = batch.octree.maxp;
calcBoundingBox(iterator.level, code, node_minp, node_maxp);
//if true, take nothing, and go to next
if (checkIfNodeOutsideSphere(node_minp, node_maxp, query, radius))
{
++iterator;
return -1;
}
if (checkIfNodeInsideSphere(node_minp, node_maxp, query, radius))
{
++iterator;
return node_idx; //return node to copy
}
//need to go to next level
int node = batch.octree.nodes[node_idx];
int children_mask = node & 0xFF;
bool isLeaf = children_mask == 0;
if (isLeaf)
{
++iterator;
return (node_idx | KernelPolicy::CHECK_FLAG); // return node to check
}
//goto next level
int first = node >> 8;
int len = __popc(children_mask);
iterator.gotoNextLevel(first, len);
return -1;
};
__device__ __forceinline__ void processLeaf(int leaf)
{
int mask = __ballot_sync(0xFFFFFFFF, leaf != -1);
while(mask)
{
unsigned int laneId = Warp::laneId();
unsigned int warpId = Warp::id();
int active_lane = __ffs(mask) - 1; //[0..31]
mask &= ~(1 << active_lane);
//broadcast active_found_count
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = found_count;
int active_found_count = storage.per_warp_buffer[warpId];
int node_idx = leaf & ~KernelPolicy::CHECK_FLAG;
//broadcast beg
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = batch.octree.begs[node_idx];
int beg = storage.per_warp_buffer[warpId];
//broadcast end
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = batch.octree.ends[node_idx];
int end = storage.per_warp_buffer[warpId];
//broadcast active_query_index
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = query_index;
int active_query_index = storage.per_warp_buffer[warpId];
int length = end - beg;
int *out = batch.output + active_query_index * batch.max_results + active_found_count;
int length_left = batch.max_results - active_found_count;
int test = __any_sync(0xFFFFFFFF, active_lane == laneId && (leaf & KernelPolicy::CHECK_FLAG));
if (test)
{
float3 active_query;
//broadcast warp_query
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.x);
active_query.x = __int_as_float(storage.per_warp_buffer[warpId]);
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.y);
active_query.y = __int_as_float(storage.per_warp_buffer[warpId]);
if (active_lane == laneId)
storage.per_warp_buffer[warpId] = __float_as_int(query.z);
active_query.z = __int_as_float(storage.per_warp_buffer[warpId]);
float radius2 = batch.bradcastRadius2((float*)&storage.per_warp_buffer[warpId], (active_lane == laneId), radius);
length = TestWarpKernel(beg, active_query, radius2, length, out, length_left);
}
else
{
length = min(length, length_left);
Warp::copy(batch.indices + beg, batch.indices + beg + length, out);
}
if (active_lane == laneId)
found_count += length;
}
}
__device__ __forceinline__ int TestWarpKernel(int beg, const float3& active_query, float radius2, int length, int* out, int length_left)
{
unsigned int idx = Warp::laneId();
int last_threadIdx = threadIdx.x - idx + 31;
int total_new = 0;
for(;;)
{
int take = 0;
if (idx < length)
{
float dx = batch.points.ptr(0)[beg + idx] - active_query.x;
float dy = batch.points.ptr(1)[beg + idx] - active_query.y;
float dz = batch.points.ptr(2)[beg + idx] - active_query.z;
float d2 = dx * dx + dy * dy + dz * dz;
if (d2 < radius2)
take = 1;
}
storage.cta_buffer[threadIdx.x] = take;
int offset = scan_warp<exclusive>(storage.cta_buffer);
//ensure that we copy
bool out_of_bounds = (offset + total_new) >= length_left;
if (take && !out_of_bounds)
out[offset] = batch.indices[beg + idx];
int new_nodes = storage.cta_buffer[last_threadIdx];
idx += Warp::STRIDE;
total_new += new_nodes;
out += new_nodes;
if (__all_sync(0xFFFFFFFF, idx >= length) || __any_sync(0xFFFFFFFF, out_of_bounds) || total_new == length_left)
break;
}
return min(total_new, length_left);
}
};
template<typename BatchType>
__global__ void KernelRS(const BatchType batch)
{
int query_index = blockIdx.x * blockDim.x + threadIdx.x;
bool active = query_index < batch.queries.size;
if (__all_sync(0xFFFFFFFF, active == false))
return;
Warp_radiusSearch<BatchType> search(batch, query_index);
search.launch(active);
}
}
}
template<typename BatchType>
void pcl::device::OctreeImpl::radiusSearchEx(BatchType& batch, const Queries& queries, NeighborIndices& results)
{
batch.indices = indices;
batch.octree = octreeGlobal;
batch.max_results = results.max_elems;
batch.output = results.data;
batch.output_sizes = results.sizes;
batch.points = points_sorted;
cudaSafeCall( cudaFuncSetCacheConfig(KernelRS<BatchType>, cudaFuncCachePreferL1) );
int block = KernelPolicy::CTA_SIZE;
int grid = divUp((int)batch.queries.size, block);
KernelRS<<<grid, block>>>(batch);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, float radius, NeighborIndices& results)
{
using BatchType = Batch<SharedRadius, DirectQuery>;
BatchType batch;
batch.radius = radius;
batch.queries = queries;
radiusSearchEx(batch, queries, results);
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, const Radiuses& radiuses, NeighborIndices& results)
{
using BatchType = Batch<IndividualRadius, DirectQuery>;
BatchType batch;
batch.radiuses = radiuses;
batch.queries = queries;
radiusSearchEx(batch, queries, results);
}
void pcl::device::OctreeImpl::radiusSearch(const Queries& queries, const Indices& indices, float radius, NeighborIndices& results)
{
using BatchType = Batch<SharedRadius, IndicesQuery>;
BatchType batch;
batch.radius = radius;
batch.queries = queries;
batch.queries_indices = indices;
batch.queries.size = indices.size();
radiusSearchEx(batch, queries, results);
}
|
500d775375f5a1ff2ff090965ccf25e9e5aafcb7.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "reader_impl.hpp"
#include <io/comp/nvcomp_adapter.hpp>
#include <io/utilities/config_utils.hpp>
#include <io/utilities/time_utils.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/iterator_categories.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/logical.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <numeric>
namespace cudf::io::detail::parquet {
namespace {
/**
* @brief Generate depth remappings for repetition and definition levels.
*
* When dealing with columns that contain lists, we must examine incoming
* repetition and definition level pairs to determine what range of output nesting
* is indicated when adding new values. This function generates the mappings of
* the R/D levels to those start/end bounds
*
* @param remap Maps column schema index to the R/D remapping vectors for that column
* @param src_col_schema The column schema to generate the new mapping for
* @param md File metadata information
*/
void generate_depth_remappings(std::map<int, std::pair<std::vector<int>, std::vector<int>>>& remap,
int src_col_schema,
aggregate_reader_metadata const& md)
{
// already generated for this level
if (remap.find(src_col_schema) != remap.end()) { return; }
auto schema = md.get_schema(src_col_schema);
int max_depth = md.get_output_nesting_depth(src_col_schema);
CUDF_EXPECTS(remap.find(src_col_schema) == remap.end(),
"Attempting to remap a schema more than once");
auto inserted =
remap.insert(std::pair<int, std::pair<std::vector<int>, std::vector<int>>>{src_col_schema, {}});
auto& depth_remap = inserted.first->second;
std::vector<int>& rep_depth_remap = (depth_remap.first);
rep_depth_remap.resize(schema.max_repetition_level + 1);
std::vector<int>& def_depth_remap = (depth_remap.second);
def_depth_remap.resize(schema.max_definition_level + 1);
// the key:
// for incoming level values R/D
// add values starting at the shallowest nesting level X has repetition level R
// until you reach the deepest nesting level Y that corresponds to the repetition level R1
// held by the nesting level that has definition level D
//
// Example: a 3 level struct with a list at the bottom
//
// R / D Depth
// level0 0 / 1 0
// level1 0 / 2 1
// level2 0 / 3 2
// list 0 / 3 3
// element 1 / 4 4
//
// incoming R/D : 0, 0 -> add values from depth 0 to 3 (def level 0 always maps to depth 0)
// incoming R/D : 0, 1 -> add values from depth 0 to 3
// incoming R/D : 0, 2 -> add values from depth 0 to 3
// incoming R/D : 1, 4 -> add values from depth 4 to 4
//
// Note : the -validity- of values is simply checked by comparing the incoming D value against the
// D value of the given nesting level (incoming D >= the D for the nesting level == valid,
// otherwise NULL). The tricky part is determining what nesting levels to add values at.
//
// For schemas with no repetition level (no lists), X is always 0 and Y is always max nesting
// depth.
//
// compute "X" from above
for (int s_idx = schema.max_repetition_level; s_idx >= 0; s_idx--) {
auto find_shallowest = [&](int r) {
int shallowest = -1;
int cur_depth = max_depth - 1;
int schema_idx = src_col_schema;
while (schema_idx > 0) {
auto cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_repetition_level == r) {
// if this is a repeated field, map it one level deeper
shallowest = cur_schema.is_stub() ? cur_depth + 1 : cur_depth;
}
// if it's one-level encoding list
else if (cur_schema.is_one_level_list()) {
shallowest = cur_depth - 1;
}
if (!cur_schema.is_stub()) { cur_depth--; }
schema_idx = cur_schema.parent_idx;
}
return shallowest;
};
rep_depth_remap[s_idx] = find_shallowest(s_idx);
}
// compute "Y" from above
for (int s_idx = schema.max_definition_level; s_idx >= 0; s_idx--) {
auto find_deepest = [&](int d) {
SchemaElement prev_schema;
int schema_idx = src_col_schema;
int r1 = 0;
while (schema_idx > 0) {
SchemaElement cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_definition_level == d) {
// if this is a repeated field, map it one level deeper
r1 = cur_schema.is_stub() ? prev_schema.max_repetition_level
: cur_schema.max_repetition_level;
break;
}
prev_schema = cur_schema;
schema_idx = cur_schema.parent_idx;
}
// we now know R1 from above. return the deepest nesting level that has the
// same repetition level
schema_idx = src_col_schema;
int depth = max_depth - 1;
while (schema_idx > 0) {
SchemaElement cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_repetition_level == r1) {
// if this is a repeated field, map it one level deeper
depth = cur_schema.is_stub() ? depth + 1 : depth;
break;
}
if (!cur_schema.is_stub()) { depth--; }
prev_schema = cur_schema;
schema_idx = cur_schema.parent_idx;
}
return depth;
};
def_depth_remap[s_idx] = find_deepest(s_idx);
}
}
/**
* @brief Return the required number of bits to store a value.
*/
template <typename T = uint8_t>
[[nodiscard]] T required_bits(uint32_t max_level)
{
return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level));
}
/**
* @brief Converts cuDF units to Parquet units.
*
* @return A tuple of Parquet type width, Parquet clock rate and Parquet decimal type.
*/
[[nodiscard]] std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id,
type_id timestamp_type_id,
parquet::Type physical,
int8_t converted,
int32_t length)
{
int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0;
int32_t clock_rate = 0;
if (column_type_id == type_id::INT8 or column_type_id == type_id::UINT8) {
type_width = 1; // I32 -> I8
} else if (column_type_id == type_id::INT16 or column_type_id == type_id::UINT16) {
type_width = 2; // I32 -> I16
} else if (column_type_id == type_id::INT32) {
type_width = 4; // str -> hash32
} else if (is_chrono(data_type{column_type_id})) {
clock_rate = to_clockrate(timestamp_type_id);
}
int8_t converted_type = converted;
if (converted_type == parquet::DECIMAL && column_type_id != type_id::FLOAT64 &&
not cudf::is_fixed_point(data_type{column_type_id})) {
converted_type = parquet::UNKNOWN; // Not converting to float64 or decimal
}
return std::make_tuple(type_width, clock_rate, converted_type);
}
/**
* @brief Reads compressed page data to device memory.
*
* @param sources Dataset sources
* @param page_data Buffers to hold compressed page data for each chunk
* @param chunks List of column chunk descriptors
* @param begin_chunk Index of first column chunk to read
* @param end_chunk Index after the last column chunk to read
* @param column_chunk_offsets File offset for all chunks
* @param chunk_source_map Association between each column chunk and its source
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return A future object for reading synchronization
*/
[[nodiscard]] std::future<void> read_column_chunks_async(
std::vector<std::unique_ptr<datasource>> const& sources,
std::vector<std::unique_ptr<datasource::buffer>>& page_data,
hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
size_t begin_chunk,
size_t end_chunk,
const std::vector<size_t>& column_chunk_offsets,
std::vector<size_type> const& chunk_source_map,
rmm::cuda_stream_view stream)
{
// Transfer chunk data, coalescing adjacent chunks
std::vector<std::future<size_t>> read_tasks;
for (size_t chunk = begin_chunk; chunk < end_chunk;) {
const size_t io_offset = column_chunk_offsets[chunk];
size_t io_size = chunks[chunk].compressed_size;
size_t next_chunk = chunk + 1;
const bool is_compressed = (chunks[chunk].codec != parquet::Compression::UNCOMPRESSED);
while (next_chunk < end_chunk) {
const size_t next_offset = column_chunk_offsets[next_chunk];
const bool is_next_compressed =
(chunks[next_chunk].codec != parquet::Compression::UNCOMPRESSED);
if (next_offset != io_offset + io_size || is_next_compressed != is_compressed) {
// Can't merge if not contiguous or mixing compressed and uncompressed
// Not coalescing uncompressed with compressed chunks is so that compressed buffers can be
// freed earlier (immediately after decompression stage) to limit peak memory requirements
break;
}
io_size += chunks[next_chunk].compressed_size;
next_chunk++;
}
if (io_size != 0) {
auto& source = sources[chunk_source_map[chunk]];
if (source->is_device_read_preferred(io_size)) {
auto buffer = rmm::device_buffer(io_size, stream);
auto fut_read_size = source->device_read_async(
io_offset, io_size, static_cast<uint8_t*>(buffer.data()), stream);
read_tasks.emplace_back(std::move(fut_read_size));
page_data[chunk] = datasource::buffer::create(std::move(buffer));
} else {
auto const buffer = source->host_read(io_offset, io_size);
page_data[chunk] =
datasource::buffer::create(rmm::device_buffer(buffer->data(), buffer->size(), stream));
}
auto d_compdata = page_data[chunk]->data();
do {
chunks[chunk].compressed_data = d_compdata;
d_compdata += chunks[chunk].compressed_size;
} while (++chunk != next_chunk);
} else {
chunk = next_chunk;
}
}
auto sync_fn = [](decltype(read_tasks) read_tasks) {
for (auto& task : read_tasks) {
task.wait();
}
};
return std::async(std::launch::deferred, sync_fn, std::move(read_tasks));
}
/**
* @brief Return the number of total pages from the given column chunks.
*
* @param chunks List of column chunk descriptors
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return The total number of pages
*/
[[nodiscard]] size_t count_page_headers(hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
rmm::cuda_stream_view stream)
{
size_t total_pages = 0;
chunks.host_to_device(stream);
gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream);
chunks.device_to_host(stream, true);
for (size_t c = 0; c < chunks.size(); c++) {
total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages;
}
return total_pages;
}
/**
* @brief Decode the page information from the given column chunks.
*
* @param chunks List of column chunk descriptors
* @param pages List of page information
* @param stream CUDA stream used for device memory operations and kernel launches
*/
void decode_page_headers(hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
hostdevice_vector<gpu::PageInfo>& pages,
rmm::cuda_stream_view stream)
{
// IMPORTANT : if you change how pages are stored within a chunk (dist pages, then data pages),
// please update preprocess_nested_columns to reflect this.
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages;
chunks[c].page_info = pages.device_ptr(page_count);
page_count += chunks[c].max_num_pages;
}
chunks.host_to_device(stream);
gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream);
pages.device_to_host(stream, true);
}
/**
* @brief Decompresses the page data, at page granularity.
*
* @param chunks List of column chunk descriptors
* @param pages List of page information
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return Device buffer to decompressed page data
*/
[[nodiscard]] rmm::device_buffer decompress_page_data(
hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
hostdevice_vector<gpu::PageInfo>& pages,
rmm::cuda_stream_view stream)
{
auto for_each_codec_page = [&](parquet::Compression codec, const std::function<void(size_t)>& f) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
const auto page_stride = chunks[c].max_num_pages;
if (chunks[c].codec == codec) {
for (int k = 0; k < page_stride; k++) {
f(page_count + k);
}
}
page_count += page_stride;
}
};
// Brotli scratch memory for decompressing
rmm::device_buffer debrotli_scratch;
// Count the exact number of compressed pages
size_t num_comp_pages = 0;
size_t total_decomp_size = 0;
struct codec_stats {
parquet::Compression compression_type = UNCOMPRESSED;
size_t num_pages = 0;
int32_t max_decompressed_size = 0;
size_t total_decomp_size = 0;
};
std::array codecs{codec_stats{parquet::GZIP},
codec_stats{parquet::SNAPPY},
codec_stats{parquet::BROTLI},
codec_stats{parquet::ZSTD}};
auto is_codec_supported = [&codecs](int8_t codec) {
if (codec == parquet::UNCOMPRESSED) return true;
return std::find_if(codecs.begin(), codecs.end(), [codec](auto& cstats) {
return codec == cstats.compression_type;
}) != codecs.end();
};
CUDF_EXPECTS(std::all_of(chunks.begin(),
chunks.end(),
[&is_codec_supported](auto const& chunk) {
return is_codec_supported(chunk.codec);
}),
"Unsupported compression type");
for (auto& codec : codecs) {
for_each_codec_page(codec.compression_type, [&](size_t page) {
auto page_uncomp_size = pages[page].uncompressed_page_size;
total_decomp_size += page_uncomp_size;
codec.total_decomp_size += page_uncomp_size;
codec.max_decompressed_size = ::max(codec.max_decompressed_size, page_uncomp_size);
codec.num_pages++;
num_comp_pages++;
});
if (codec.compression_type == parquet::BROTLI && codec.num_pages > 0) {
debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.num_pages), stream);
}
}
// Dispatch batches of pages to decompress for each codec
rmm::device_buffer decomp_pages(total_decomp_size, stream);
std::vector<device_span<uint8_t const>> comp_in;
comp_in.reserve(num_comp_pages);
std::vector<device_span<uint8_t>> comp_out;
comp_out.reserve(num_comp_pages);
// vectors to save v2 def and rep level data, if any
std::vector<device_span<uint8_t const>> copy_in;
copy_in.reserve(num_comp_pages);
std::vector<device_span<uint8_t>> copy_out;
copy_out.reserve(num_comp_pages);
rmm::device_uvector<compression_result> comp_res(num_comp_pages, stream);
thrust::fill(rmm::exec_policy(stream),
comp_res.begin(),
comp_res.end(),
compression_result{0, compression_status::FAILURE});
size_t decomp_offset = 0;
int32_t start_pos = 0;
for (const auto& codec : codecs) {
if (codec.num_pages == 0) { continue; }
for_each_codec_page(codec.compression_type, [&](size_t page_idx) {
auto const dst_base = static_cast<uint8_t*>(decomp_pages.data()) + decomp_offset;
auto& page = pages[page_idx];
// offset will only be non-zero for V2 pages
auto const offset = page.def_lvl_bytes + page.rep_lvl_bytes;
// for V2 need to copy def and rep level info into place, and then offset the
// input and output buffers. otherwise we'd have to keep both the compressed
// and decompressed data.
if (offset != 0) {
copy_in.emplace_back(page.page_data, offset);
copy_out.emplace_back(dst_base, offset);
}
comp_in.emplace_back(page.page_data + offset,
static_cast<size_t>(page.compressed_page_size - offset));
comp_out.emplace_back(dst_base + offset,
static_cast<size_t>(page.uncompressed_page_size - offset));
page.page_data = dst_base;
decomp_offset += page.uncompressed_page_size;
});
host_span<device_span<uint8_t const> const> comp_in_view{comp_in.data() + start_pos,
codec.num_pages};
auto const d_comp_in = cudf::detail::make_device_uvector_async(comp_in_view, stream);
host_span<device_span<uint8_t> const> comp_out_view(comp_out.data() + start_pos,
codec.num_pages);
auto const d_comp_out = cudf::detail::make_device_uvector_async(comp_out_view, stream);
device_span<compression_result> d_comp_res_view(comp_res.data() + start_pos, codec.num_pages);
switch (codec.compression_type) {
case parquet::GZIP:
gpuinflate(d_comp_in, d_comp_out, d_comp_res_view, gzip_header_included::YES, stream);
break;
case parquet::SNAPPY:
if (nvcomp_integration::is_stable_enabled()) {
nvcomp::batched_decompress(nvcomp::compression_type::SNAPPY,
d_comp_in,
d_comp_out,
d_comp_res_view,
codec.max_decompressed_size,
codec.total_decomp_size,
stream);
} else {
gpu_unsnap(d_comp_in, d_comp_out, d_comp_res_view, stream);
}
break;
case parquet::ZSTD:
nvcomp::batched_decompress(nvcomp::compression_type::ZSTD,
d_comp_in,
d_comp_out,
d_comp_res_view,
codec.max_decompressed_size,
codec.total_decomp_size,
stream);
break;
case parquet::BROTLI:
gpu_debrotli(d_comp_in,
d_comp_out,
d_comp_res_view,
debrotli_scratch.data(),
debrotli_scratch.size(),
stream);
break;
default: CUDF_FAIL("Unexpected decompression dispatch"); break;
}
start_pos += codec.num_pages;
}
CUDF_EXPECTS(thrust::all_of(rmm::exec_policy(stream),
comp_res.begin(),
comp_res.end(),
[] __device__(auto const& res) {
return res.status == compression_status::SUCCESS;
}),
"Error during decompression");
// now copy the uncompressed V2 def and rep level data
if (not copy_in.empty()) {
auto const d_copy_in = cudf::detail::make_device_uvector_async(copy_in, stream);
auto const d_copy_out = cudf::detail::make_device_uvector_async(copy_out, stream);
gpu_copy_uncompressed_blocks(d_copy_in, d_copy_out, stream);
stream.synchronize();
}
// Update the page information in device memory with the updated value of
// page_data; it now points to the uncompressed data buffer
pages.host_to_device(stream);
return decomp_pages;
}
} // namespace
void reader::impl::allocate_nesting_info()
{
auto const& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
auto& page_nesting_info = _file_itm_data.page_nesting_info;
// compute total # of page_nesting infos needed and allocate space. doing this in one
// buffer to keep it to a single gpu allocation
size_t const total_page_nesting_infos = std::accumulate(
chunks.host_ptr(), chunks.host_ptr() + chunks.size(), 0, [&](int total, auto& chunk) {
// the schema of the input column
auto const& schema = _metadata->get_schema(chunk.src_col_schema);
auto const per_page_nesting_info_size = max(
schema.max_definition_level + 1, _metadata->get_output_nesting_depth(chunk.src_col_schema));
return total + (per_page_nesting_info_size * chunk.num_data_pages);
});
page_nesting_info = hostdevice_vector<gpu::PageNestingInfo>{total_page_nesting_infos, _stream};
// retrieve from the gpu so we can update
pages.device_to_host(_stream, true);
// update pointers in the PageInfos
int target_page_index = 0;
int src_info_index = 0;
for (size_t idx = 0; idx < chunks.size(); idx++) {
int src_col_schema = chunks[idx].src_col_schema;
auto& schema = _metadata->get_schema(src_col_schema);
auto const per_page_nesting_info_size = ::max(
schema.max_definition_level + 1, _metadata->get_output_nesting_depth(src_col_schema));
// skip my dict pages
target_page_index += chunks[idx].num_dict_pages;
for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) {
pages[target_page_index + p_idx].nesting = page_nesting_info.device_ptr() + src_info_index;
pages[target_page_index + p_idx].num_nesting_levels = per_page_nesting_info_size;
src_info_index += per_page_nesting_info_size;
}
target_page_index += chunks[idx].num_data_pages;
}
// copy back to the gpu
pages.host_to_device(_stream);
// fill in
int nesting_info_index = 0;
std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping;
for (size_t idx = 0; idx < chunks.size(); idx++) {
int src_col_schema = chunks[idx].src_col_schema;
// schema of the input column
auto& schema = _metadata->get_schema(src_col_schema);
// real depth of the output cudf column hierarchy (1 == no nesting, 2 == 1 level, etc)
int max_depth = _metadata->get_output_nesting_depth(src_col_schema);
// # of nesting infos stored per page for this column
auto const per_page_nesting_info_size = ::max(schema.max_definition_level + 1, max_depth);
// if this column has lists, generate depth remapping
std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping;
if (schema.max_repetition_level > 0) {
generate_depth_remappings(depth_remapping, src_col_schema, *_metadata);
}
// fill in host-side nesting info
int schema_idx = src_col_schema;
auto cur_schema = _metadata->get_schema(schema_idx);
int cur_depth = max_depth - 1;
while (schema_idx > 0) {
// stub columns (basically the inner field of a list scheme element) are not real columns.
// we can ignore them for the purposes of output nesting info
if (!cur_schema.is_stub()) {
// initialize each page within the chunk
for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) {
gpu::PageNestingInfo* pni =
&page_nesting_info[nesting_info_index + (p_idx * per_page_nesting_info_size)];
// if we have lists, set our start and end depth remappings
if (schema.max_repetition_level > 0) {
auto remap = depth_remapping.find(src_col_schema);
CUDF_EXPECTS(remap != depth_remapping.end(),
"Could not find depth remapping for schema");
std::vector<int> const& rep_depth_remap = (remap->second.first);
std::vector<int> const& def_depth_remap = (remap->second.second);
for (size_t m = 0; m < rep_depth_remap.size(); m++) {
pni[m].start_depth = rep_depth_remap[m];
}
for (size_t m = 0; m < def_depth_remap.size(); m++) {
pni[m].end_depth = def_depth_remap[m];
}
}
// values indexed by output column index
pni[cur_depth].max_def_level = cur_schema.max_definition_level;
pni[cur_depth].max_rep_level = cur_schema.max_repetition_level;
pni[cur_depth].size = 0;
pni[cur_depth].type =
to_type_id(cur_schema, _strings_to_categorical, _timestamp_type.id());
pni[cur_depth].nullable = cur_schema.repetition_type == OPTIONAL;
}
// move up the hierarchy
cur_depth--;
}
// next schema
schema_idx = cur_schema.parent_idx;
cur_schema = _metadata->get_schema(schema_idx);
}
nesting_info_index += (per_page_nesting_info_size * chunks[idx].num_data_pages);
}
// copy nesting info to the device
page_nesting_info.host_to_device(_stream);
}
void reader::impl::load_and_decompress_data(std::vector<row_group_info> const& row_groups_info,
size_type num_rows)
{
// This function should never be called if `num_rows == 0`.
CUDF_EXPECTS(num_rows > 0, "Number of reading rows must not be zero.");
auto& raw_page_data = _file_itm_data.raw_page_data;
auto& decomp_page_data = _file_itm_data.decomp_page_data;
auto& chunks = _file_itm_data.chunks;
auto& pages_info = _file_itm_data.pages_info;
// Descriptors for all the chunks that make up the selected columns
const auto num_input_columns = _input_columns.size();
const auto num_chunks = row_groups_info.size() * num_input_columns;
chunks = hostdevice_vector<gpu::ColumnChunkDesc>(0, num_chunks, _stream);
// Association between each column chunk and its source
std::vector<size_type> chunk_source_map(num_chunks);
// Tracker for eventually deallocating compressed and uncompressed data
raw_page_data = std::vector<std::unique_ptr<datasource::buffer>>(num_chunks);
// Keep track of column chunk file offsets
std::vector<size_t> column_chunk_offsets(num_chunks);
// Initialize column chunk information
size_t total_decompressed_size = 0;
auto remaining_rows = num_rows;
std::vector<std::future<void>> read_rowgroup_tasks;
for (const auto& rg : row_groups_info) {
const auto& row_group = _metadata->get_row_group(rg.index, rg.source_index);
auto const row_group_start = rg.start_row;
auto const row_group_source = rg.source_index;
auto const row_group_rows = std::min<int>(remaining_rows, row_group.num_rows);
auto const io_chunk_idx = chunks.size();
// generate ColumnChunkDesc objects for everything to be decoded (all input columns)
for (size_t i = 0; i < num_input_columns; ++i) {
auto col = _input_columns[i];
// look up metadata
auto& col_meta = _metadata->get_column_metadata(rg.index, rg.source_index, col.schema_idx);
auto& schema = _metadata->get_schema(col.schema_idx);
auto [type_width, clock_rate, converted_type] =
conversion_info(to_type_id(schema, _strings_to_categorical, _timestamp_type.id()),
_timestamp_type.id(),
schema.type,
schema.converted_type,
schema.type_length);
column_chunk_offsets[chunks.size()] =
(col_meta.dictionary_page_offset != 0)
? ::min(col_meta.data_page_offset, col_meta.dictionary_page_offset)
: col_meta.data_page_offset;
chunks.push_back(gpu::ColumnChunkDesc(col_meta.total_compressed_size,
nullptr,
col_meta.num_values,
schema.type,
type_width,
row_group_start,
row_group_rows,
schema.max_definition_level,
schema.max_repetition_level,
_metadata->get_output_nesting_depth(col.schema_idx),
required_bits(schema.max_definition_level),
required_bits(schema.max_repetition_level),
col_meta.codec,
converted_type,
schema.logical_type,
schema.decimal_precision,
clock_rate,
i,
col.schema_idx));
// Map each column chunk to its column index and its source index
chunk_source_map[chunks.size() - 1] = row_group_source;
if (col_meta.codec != Compression::UNCOMPRESSED) {
total_decompressed_size += col_meta.total_uncompressed_size;
}
}
// Read compressed chunk data to device memory
read_rowgroup_tasks.push_back(read_column_chunks_async(_sources,
raw_page_data,
chunks,
io_chunk_idx,
chunks.size(),
column_chunk_offsets,
chunk_source_map,
_stream));
remaining_rows -= row_group.num_rows;
}
for (auto& task : read_rowgroup_tasks) {
task.wait();
}
CUDF_EXPECTS(remaining_rows <= 0, "All rows data must be read.");
// Process dataset chunk pages into output columns
auto const total_pages = count_page_headers(chunks, _stream);
pages_info = hostdevice_vector<gpu::PageInfo>(total_pages, total_pages, _stream);
if (total_pages > 0) {
// decoding of column/page information
decode_page_headers(chunks, pages_info, _stream);
if (total_decompressed_size > 0) {
decomp_page_data = decompress_page_data(chunks, pages_info, _stream);
// Free compressed data
for (size_t c = 0; c < chunks.size(); c++) {
if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) {
raw_page_data[c].reset();
// TODO: Check if this is called
}
}
}
// build output column info
// walk the schema, building out_buffers that mirror what our final cudf columns will look
// like. important : there is not necessarily a 1:1 mapping between input columns and output
// columns. For example, parquet does not explicitly store a ColumnChunkDesc for struct
// columns. The "structiness" is simply implied by the schema. For example, this schema:
// required group field_id=1 name {
// required binary field_id=2 firstname (String);
// required binary field_id=3 middlename (String);
// required binary field_id=4 lastname (String);
// }
// will only contain 3 columns of data (firstname, middlename, lastname). But of course
// "name" is a struct column that we want to return, so we have to make sure that we
// create it ourselves.
// std::vector<output_column_info> output_info = build_output_column_info();
// nesting information (sizes, etc) stored -per page-
// note : even for flat schemas, we allocate 1 level of "nesting" info
allocate_nesting_info();
}
}
namespace {
struct cumulative_row_info {
size_t row_count; // cumulative row count
size_t size_bytes; // cumulative size in bytes
int key; // schema index
};
#if defined(PREPROCESS_DEBUG)
void print_pages(hostdevice_vector<gpu::PageInfo>& pages, rmm::cuda_stream_view _stream)
{
pages.device_to_host(_stream, true);
for (size_t idx = 0; idx < pages.size(); idx++) {
auto const& p = pages[idx];
// skip dictionary pages
if (p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) { continue; }
printf(
"P(%lu, s:%d): chunk_row(%d), num_rows(%d), skipped_values(%d), skipped_leaf_values(%d)\n",
idx,
p.src_col_schema,
p.chunk_row,
p.num_rows,
p.skipped_values,
p.skipped_leaf_values);
}
}
void print_cumulative_page_info(hostdevice_vector<gpu::PageInfo>& pages,
rmm::device_uvector<int32_t> const& page_index,
rmm::device_uvector<cumulative_row_info> const& c_info,
rmm::cuda_stream_view stream)
{
pages.device_to_host(stream, true);
printf("------------\nCumulative sizes by page\n");
std::vector<int> schemas(pages.size());
std::vector<int> h_page_index(pages.size());
hipMemcpy(
h_page_index.data(), page_index.data(), sizeof(int) * pages.size(), hipMemcpyDeviceToHost);
std::vector<cumulative_row_info> h_cinfo(pages.size());
hipMemcpy(h_cinfo.data(),
c_info.data(),
sizeof(cumulative_row_info) * pages.size(),
hipMemcpyDeviceToHost);
auto schema_iter = cudf::detail::make_counting_transform_iterator(
0, [&](size_type i) { return pages[h_page_index[i]].src_col_schema; });
thrust::copy(thrust::seq, schema_iter, schema_iter + pages.size(), schemas.begin());
auto last = thrust::unique(thrust::seq, schemas.begin(), schemas.end());
schemas.resize(last - schemas.begin());
printf("Num schemas: %lu\n", schemas.size());
for (size_t idx = 0; idx < schemas.size(); idx++) {
printf("Schema %d\n", schemas[idx]);
for (size_t pidx = 0; pidx < pages.size(); pidx++) {
auto const& page = pages[h_page_index[pidx]];
if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || page.src_col_schema != schemas[idx]) {
continue;
}
printf("\tP: {%lu, %lu}\n", h_cinfo[pidx].row_count, h_cinfo[pidx].size_bytes);
}
}
}
void print_cumulative_row_info(
host_span<cumulative_row_info const> sizes,
std::string const& label,
std::optional<std::vector<gpu::chunk_read_info>> splits = std::nullopt)
{
if (splits.has_value()) {
printf("------------\nSplits\n");
for (size_t idx = 0; idx < splits->size(); idx++) {
printf("{%lu, %lu}\n", splits.value()[idx].skip_rows, splits.value()[idx].num_rows);
}
}
printf("------------\nCumulative sizes %s\n", label.c_str());
for (size_t idx = 0; idx < sizes.size(); idx++) {
printf("{%lu, %lu, %d}", sizes[idx].row_count, sizes[idx].size_bytes, sizes[idx].key);
if (splits.has_value()) {
// if we have a split at this row count and this is the last instance of this row count
auto start = thrust::make_transform_iterator(
splits->begin(), [](gpu::chunk_read_info const& i) { return i.skip_rows; });
auto end = start + splits->size();
auto split = std::find(start, end, sizes[idx].row_count);
auto const split_index = [&]() -> int {
if (split != end &&
((idx == sizes.size() - 1) || (sizes[idx + 1].row_count > sizes[idx].row_count))) {
return static_cast<int>(std::distance(start, split));
}
return idx == 0 ? 0 : -1;
}();
if (split_index >= 0) {
printf(" <-- split {%lu, %lu}",
splits.value()[split_index].skip_rows,
splits.value()[split_index].num_rows);
}
}
printf("\n");
}
}
#endif // PREPROCESS_DEBUG
/**
* @brief Functor which reduces two cumulative_row_info structs of the same key.
*/
struct cumulative_row_sum {
cumulative_row_info operator()
__device__(cumulative_row_info const& a, cumulative_row_info const& b) const
{
return cumulative_row_info{a.row_count + b.row_count, a.size_bytes + b.size_bytes, a.key};
}
};
/**
* @brief Functor which computes the total data size for a given type of cudf column.
*
* In the case of strings, the return size does not include the chars themselves. That
* information is tracked separately (see PageInfo::str_bytes).
*/
struct row_size_functor {
__device__ size_t validity_size(size_t num_rows, bool nullable)
{
return nullable ? (cudf::util::div_rounding_up_safe(num_rows, size_t{32}) * 4) : 0;
}
template <typename T>
__device__ size_t operator()(size_t num_rows, bool nullable)
{
auto const element_size = sizeof(device_storage_type_t<T>);
return (element_size * num_rows) + validity_size(num_rows, nullable);
}
};
template <>
__device__ size_t row_size_functor::operator()<list_view>(size_t num_rows, bool nullable)
{
auto const offset_size = sizeof(offset_type);
// NOTE: Adding the + 1 offset here isn't strictly correct. There will only be 1 extra offset
// for the entire column, whereas this is adding an extra offset per page. So we will get a
// small over-estimate of the real size of the order : # of pages * 4 bytes. It seems better
// to overestimate size somewhat than to underestimate it and potentially generate chunks
// that are too large.
return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable);
}
template <>
__device__ size_t row_size_functor::operator()<struct_view>(size_t num_rows, bool nullable)
{
return validity_size(num_rows, nullable);
}
template <>
__device__ size_t row_size_functor::operator()<string_view>(size_t num_rows, bool nullable)
{
// only returns the size of offsets and validity. the size of the actual string chars
// is tracked separately.
auto const offset_size = sizeof(offset_type);
// see note about offsets in the list_view template.
return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable);
}
/**
* @brief Functor which computes the total output cudf data size for all of
* the data in this page.
*
* Sums across all nesting levels.
*/
struct get_cumulative_row_info {
gpu::PageInfo const* const pages;
__device__ cumulative_row_info operator()(size_type index)
{
auto const& page = pages[index];
if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) {
return cumulative_row_info{0, 0, page.src_col_schema};
}
// total nested size, not counting string data
auto iter =
cudf::detail::make_counting_transform_iterator(0, [page, index] __device__(size_type i) {
auto const& pni = page.nesting[i];
return cudf::type_dispatcher(
data_type{pni.type}, row_size_functor{}, pni.size, pni.nullable);
});
size_t const row_count = static_cast<size_t>(page.nesting[0].size);
return {row_count,
thrust::reduce(thrust::seq, iter, iter + page.num_nesting_levels) + page.str_bytes,
page.src_col_schema};
}
};
/**
* @brief Functor which computes the effective size of all input columns by page.
*
* For a given row, we want to find the cost of all pages for all columns involved
* in loading up to that row. The complication here is that not all pages are the
* same size between columns. Example:
*
* page row counts
* Column A: 0 <----> 100 <----> 200
* Column B: 0 <---------------> 200 <--------> 400
|
* if we decide to split at row 100, we don't really know the actual amount of bytes in column B
* at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that
* page. Essentially, a conservative over-estimate of the real size.
*/
struct row_total_size {
cumulative_row_info const* c_info;
size_type const* key_offsets;
size_t num_keys;
__device__ cumulative_row_info operator()(cumulative_row_info const& i)
{
// sum sizes for each input column at this row
size_t sum = 0;
for (int idx = 0; idx < num_keys; idx++) {
auto const start = key_offsets[idx];
auto const end = key_offsets[idx + 1];
auto iter = cudf::detail::make_counting_transform_iterator(
0, [&] __device__(size_type i) { return c_info[i].row_count; });
auto const page_index =
thrust::lower_bound(thrust::seq, iter + start, iter + end, i.row_count) - iter;
sum += c_info[page_index].size_bytes;
}
return {i.row_count, sum, i.key};
}
};
/**
* @brief Given a vector of cumulative {row_count, byte_size} pairs and a chunk read
* limit, determine the set of splits.
*
* @param sizes Vector of cumulative {row_count, byte_size} pairs
* @param num_rows Total number of rows to read
* @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns
*/
std::vector<gpu::chunk_read_info> find_splits(std::vector<cumulative_row_info> const& sizes,
size_t num_rows,
size_t chunk_read_limit)
{
// now we have an array of {row_count, real output bytes}. just walk through it and generate
// splits.
// TODO: come up with a clever way to do this entirely in parallel. For now, as long as batch
// sizes are reasonably large, this shouldn't iterate too many times
std::vector<gpu::chunk_read_info> splits;
{
size_t cur_pos = 0;
size_t cur_cumulative_size = 0;
size_t cur_row_count = 0;
auto start = thrust::make_transform_iterator(sizes.begin(), [&](cumulative_row_info const& i) {
return i.size_bytes - cur_cumulative_size;
});
auto end = start + sizes.size();
while (cur_row_count < num_rows) {
int64_t split_pos =
thrust::lower_bound(thrust::seq, start + cur_pos, end, chunk_read_limit) - start;
// if we're past the end, or if the returned bucket is > than the chunk_read_limit, move back
// one.
if (static_cast<size_t>(split_pos) >= sizes.size() ||
(sizes[split_pos].size_bytes - cur_cumulative_size > chunk_read_limit)) {
split_pos--;
}
// best-try. if we can't find something that'll fit, we have to go bigger. we're doing this in
// a loop because all of the cumulative sizes for all the pages are sorted into one big list.
// so if we had two columns, both of which had an entry {1000, 10000}, that entry would be in
// the list twice. so we have to iterate until we skip past all of them. The idea is that we
// either do this, or we have to call unique() on the input first.
while (split_pos < (static_cast<int64_t>(sizes.size()) - 1) &&
(split_pos < 0 || sizes[split_pos].row_count == cur_row_count)) {
split_pos++;
}
auto const start_row = cur_row_count;
cur_row_count = sizes[split_pos].row_count;
splits.push_back(gpu::chunk_read_info{start_row, cur_row_count - start_row});
cur_pos = split_pos;
cur_cumulative_size = sizes[split_pos].size_bytes;
}
}
// print_cumulative_row_info(sizes, "adjusted", splits);
return splits;
}
/**
* @brief Given a set of pages that have had their sizes computed by nesting level and
* a limit on total read size, generate a set of {skip_rows, num_rows} pairs representing
* a set of reads that will generate output columns of total size <= `chunk_read_limit` bytes.
*
* @param pages All pages in the file
* @param id Additional intermediate information required to process the pages
* @param num_rows Total number of rows to read
* @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns
* @param stream CUDA stream to use, default 0
*/
std::vector<gpu::chunk_read_info> compute_splits(hostdevice_vector<gpu::PageInfo>& pages,
gpu::chunk_intermediate_data const& id,
size_t num_rows,
size_t chunk_read_limit,
rmm::cuda_stream_view stream)
{
auto const& page_keys = id.page_keys;
auto const& page_index = id.page_index;
// generate cumulative row counts and sizes
rmm::device_uvector<cumulative_row_info> c_info(page_keys.size(), stream);
// convert PageInfo to cumulative_row_info
auto page_input = thrust::make_transform_iterator(page_index.begin(),
get_cumulative_row_info{pages.device_ptr()});
thrust::inclusive_scan_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
page_input,
c_info.begin(),
thrust::equal_to{},
cumulative_row_sum{});
// print_cumulative_page_info(pages, page_index, c_info, stream);
// sort by row count
rmm::device_uvector<cumulative_row_info> c_info_sorted{c_info, stream};
thrust::sort(rmm::exec_policy(stream),
c_info_sorted.begin(),
c_info_sorted.end(),
[] __device__(cumulative_row_info const& a, cumulative_row_info const& b) {
return a.row_count < b.row_count;
});
std::vector<cumulative_row_info> h_c_info_sorted(c_info_sorted.size());
hipMemcpy(h_c_info_sorted.data(),
c_info_sorted.data(),
sizeof(cumulative_row_info) * c_info_sorted.size(),
hipMemcpyDeviceToHost);
// print_cumulative_row_info(h_c_info_sorted, "raw");
// generate key offsets (offsets to the start of each partition of keys). worst case is 1 page per
// key
rmm::device_uvector<size_type> key_offsets(page_keys.size() + 1, stream);
auto const key_offsets_end = thrust::reduce_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
thrust::make_constant_iterator(1),
thrust::make_discard_iterator(),
key_offsets.begin())
.second;
size_t const num_unique_keys = key_offsets_end - key_offsets.begin();
thrust::exclusive_scan(
rmm::exec_policy(stream), key_offsets.begin(), key_offsets.end(), key_offsets.begin());
// adjust the cumulative info such that for each row count, the size includes any pages that span
// that row count. this is so that if we have this case:
// page row counts
// Column A: 0 <----> 100 <----> 200
// Column B: 0 <---------------> 200 <--------> 400
// |
// if we decide to split at row 100, we don't really know the actual amount of bytes in column B
// at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that
// page.
//
rmm::device_uvector<cumulative_row_info> aggregated_info(c_info.size(), stream);
thrust::transform(rmm::exec_policy(stream),
c_info_sorted.begin(),
c_info_sorted.end(),
aggregated_info.begin(),
row_total_size{c_info.data(), key_offsets.data(), num_unique_keys});
// bring back to the cpu
std::vector<cumulative_row_info> h_aggregated_info(aggregated_info.size());
hipMemcpyAsync(h_aggregated_info.data(),
aggregated_info.data(),
sizeof(cumulative_row_info) * c_info.size(),
hipMemcpyDeviceToHost,
stream);
stream.synchronize();
return find_splits(h_aggregated_info, num_rows, chunk_read_limit);
}
struct get_page_chunk_idx {
__device__ size_type operator()(gpu::PageInfo const& page) { return page.chunk_idx; }
};
struct get_page_num_rows {
__device__ size_type operator()(gpu::PageInfo const& page) { return page.num_rows; }
};
struct get_page_schema {
__device__ size_type operator()(gpu::PageInfo const& page) { return page.src_col_schema; }
};
/**
* @brief Returns the size field of a PageInfo struct for a given depth, keyed by schema.
*/
struct get_page_nesting_size {
size_type const src_col_schema;
size_type const depth;
gpu::PageInfo const* const pages;
__device__ size_type operator()(int index) const
{
auto const& page = pages[index];
if (page.src_col_schema != src_col_schema || page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) {
return 0;
}
return page.nesting[depth].batch_size;
}
};
/**
* @brief Writes to the chunk_row field of the PageInfo struct.
*/
struct chunk_row_output_iter {
gpu::PageInfo* p;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
__host__ __device__ chunk_row_output_iter operator+(int i)
{
return chunk_row_output_iter{p + i};
}
__host__ __device__ void operator++() { p++; }
__device__ reference operator[](int i) { return p[i].chunk_row; }
__device__ reference operator*() { return p->chunk_row; }
};
/**
* @brief Writes to the page_start_value field of the PageNestingInfo struct, keyed by schema.
*/
struct start_offset_output_iterator {
gpu::PageInfo* pages;
int const* page_indices;
int cur_index;
int src_col_schema;
int nesting_depth;
int empty = 0;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
constexpr void operator=(start_offset_output_iterator const& other)
{
pages = other.pages;
page_indices = other.page_indices;
cur_index = other.cur_index;
src_col_schema = other.src_col_schema;
nesting_depth = other.nesting_depth;
}
constexpr start_offset_output_iterator operator+(int i)
{
return start_offset_output_iterator{
pages, page_indices, cur_index + i, src_col_schema, nesting_depth};
}
constexpr void operator++() { cur_index++; }
__device__ reference operator[](int i) { return dereference(cur_index + i); }
__device__ reference operator*() { return dereference(cur_index); }
private:
__device__ reference dereference(int index)
{
gpu::PageInfo const& p = pages[page_indices[index]];
if (p.src_col_schema != src_col_schema || p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) {
return empty;
}
return p.nesting[nesting_depth].page_start_value;
}
};
} // anonymous namespace
void reader::impl::preprocess_pages(size_t skip_rows,
size_t num_rows,
bool uses_custom_row_bounds,
size_t chunk_read_limit)
{
auto& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
// iterate over all input columns and determine if they contain lists so we can further
// preprocess them.
bool has_lists = false;
for (size_t idx = 0; idx < _input_columns.size(); idx++) {
auto const& input_col = _input_columns[idx];
size_t const max_depth = input_col.nesting_depth();
auto* cols = &_output_buffers;
for (size_t l_idx = 0; l_idx < max_depth; l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// if this has a list parent, we have to get column sizes from the
// data computed during gpu::ComputePageSizes
if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) {
has_lists = true;
break;
}
}
if (has_lists) { break; }
}
// generate string dict indices if necessary
{
auto is_dict_chunk = [](const gpu::ColumnChunkDesc& chunk) {
return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0;
};
// Count the number of string dictionary entries
// NOTE: Assumes first page in the chunk is always the dictionary page
size_t total_str_dict_indexes = 0;
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
total_str_dict_indexes += pages[page_count].num_input_values;
}
page_count += chunks[c].max_num_pages;
}
// Build index for string dictionaries since they can't be indexed
// directly due to variable-sized elements
_chunk_itm_data.str_dict_index =
cudf::detail::make_zeroed_device_uvector_async<string_index_pair>(total_str_dict_indexes,
_stream);
// Update chunks with pointers to string dict indices
for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) {
input_column_info const& input_col = _input_columns[chunks[c].src_col_index];
CUDF_EXPECTS(input_col.schema_idx == chunks[c].src_col_schema,
"Column/page schema index mismatch");
if (is_dict_chunk(chunks[c])) {
chunks[c].str_dict_index = _chunk_itm_data.str_dict_index.data() + str_ofs;
str_ofs += pages[page_count].num_input_values;
}
// column_data_base will always point to leaf data, even for nested types.
page_count += chunks[c].max_num_pages;
}
if (total_str_dict_indexes > 0) {
chunks.host_to_device(_stream);
gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), _stream);
}
}
// intermediate data we will need for further chunked reads
if (has_lists || chunk_read_limit > 0) {
// computes:
// PageNestingInfo::num_rows for each page. the true number of rows (taking repetition into
// account), not just the number of values. PageNestingInfo::size for each level of nesting, for
// each page.
//
// we will be applying a later "trim" pass if skip_rows/num_rows is being used, which can happen
// if:
// - user has passed custom row bounds
// - we will be doing a chunked read
gpu::ComputePageSizes(pages,
chunks,
0, // 0-max size_t. process all possible rows
std::numeric_limits<size_t>::max(),
true, // compute num_rows
chunk_read_limit > 0, // compute string sizes
_stream);
// computes:
// PageInfo::chunk_row (the absolute start row index) for all pages
// Note: this is doing some redundant work for pages in flat hierarchies. chunk_row has already
// been computed during header decoding. the overall amount of work here is very small though.
auto key_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_chunk_idx{});
auto page_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_num_rows{});
thrust::exclusive_scan_by_key(rmm::exec_policy(_stream),
key_input,
key_input + pages.size(),
page_input,
chunk_row_output_iter{pages.device_ptr()});
// compute page ordering.
//
// ordering of pages is by input column schema, repeated across row groups. so
// if we had 3 columns, each with 2 pages, and 1 row group, our schema values might look like
//
// 1, 1, 2, 2, 3, 3
//
// However, if we had more than one row group, the pattern would be
//
// 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3
// ^ row group 0 |
// ^ row group 1
//
// To use exclusive_scan_by_key, the ordering we actually want is
//
// 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
//
// We also need to preserve key-relative page ordering, so we need to use a stable sort.
_chunk_itm_data.page_keys = rmm::device_uvector<int>(pages.size(), _stream);
_chunk_itm_data.page_index = rmm::device_uvector<int>(pages.size(), _stream);
auto& page_keys = _chunk_itm_data.page_keys;
auto& page_index = _chunk_itm_data.page_index;
{
thrust::transform(rmm::exec_policy(_stream),
pages.device_ptr(),
pages.device_ptr() + pages.size(),
page_keys.begin(),
get_page_schema{});
thrust::sequence(rmm::exec_policy(_stream), page_index.begin(), page_index.end());
thrust::stable_sort_by_key(rmm::exec_policy(_stream),
page_keys.begin(),
page_keys.end(),
page_index.begin(),
thrust::less<int>());
}
// retrieve pages back
pages.device_to_host(_stream, true);
#if defined(PREPROCESS_DEBUG)
print_pages(pages, _stream);
#endif
}
// compute splits if necessary. otherwise return a single split representing
// the whole file.
_chunk_read_info = chunk_read_limit > 0
? compute_splits(pages, _chunk_itm_data, num_rows, chunk_read_limit, _stream)
: std::vector<gpu::chunk_read_info>{{skip_rows, num_rows}};
}
void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds)
{
auto const& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
// Should not reach here if there is no page data.
CUDF_EXPECTS(pages.size() > 0, "There is no page to parse");
// computes:
// PageNestingInfo::batch_size for each level of nesting, for each page, taking row bounds into
// account. PageInfo::skipped_values, which tells us where to start decoding in the input to
// respect the user bounds. It is only necessary to do this second pass if uses_custom_row_bounds
// is set (if the user has specified artificial bounds).
if (uses_custom_row_bounds) {
gpu::ComputePageSizes(pages,
chunks,
skip_rows,
num_rows,
false, // num_rows is already computed
false, // no need to compute string sizes
_stream);
#if defined(PREPROCESS_DEBUG)
print_pages(pages, _stream);
#endif
}
// iterate over all input columns and allocate any associated output
// buffers if they are not part of a list hierarchy. mark down
// if we have any list columns that need further processing.
bool has_lists = false;
for (size_t idx = 0; idx < _input_columns.size(); idx++) {
auto const& input_col = _input_columns[idx];
size_t const max_depth = input_col.nesting_depth();
auto* cols = &_output_buffers;
for (size_t l_idx = 0; l_idx < max_depth; l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// if this has a list parent, we have to get column sizes from the
// data computed during gpu::ComputePageSizes
if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) {
has_lists = true;
}
// if we haven't already processed this column because it is part of a struct hierarchy
else if (out_buf.size == 0) {
// add 1 for the offset if this is a list column
out_buf.create(
out_buf.type.id() == type_id::LIST && l_idx < max_depth ? num_rows + 1 : num_rows,
_stream,
_mr);
}
}
}
// compute output column sizes by examining the pages of the -input- columns
if (has_lists) {
auto& page_keys = _chunk_itm_data.page_keys;
auto& page_index = _chunk_itm_data.page_index;
for (size_t idx = 0; idx < _input_columns.size(); idx++) {
auto const& input_col = _input_columns[idx];
auto src_col_schema = input_col.schema_idx;
size_t max_depth = input_col.nesting_depth();
auto* cols = &_output_buffers;
for (size_t l_idx = 0; l_idx < input_col.nesting_depth(); l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// size iterator. indexes pages by sorted order
auto size_input = thrust::make_transform_iterator(
page_index.begin(),
get_page_nesting_size{src_col_schema, static_cast<size_type>(l_idx), pages.device_ptr()});
// if this buffer is part of a list hierarchy, we need to determine it's
// final size and allocate it here.
//
// for struct columns, higher levels of the output columns are shared between input
// columns. so don't compute any given level more than once.
if ((out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) && out_buf.size == 0) {
int size =
thrust::reduce(rmm::exec_policy(_stream), size_input, size_input + pages.size());
// if this is a list column add 1 for non-leaf levels for the terminating offset
if (out_buf.type.id() == type_id::LIST && l_idx < max_depth) { size++; }
// allocate
out_buf.create(size, _stream, _mr);
}
// for nested hierarchies, compute per-page start offset
if (input_col.has_repetition) {
thrust::exclusive_scan_by_key(
rmm::exec_policy(_stream),
page_keys.begin(),
page_keys.end(),
size_input,
start_offset_output_iterator{pages.device_ptr(),
page_index.begin(),
0,
static_cast<int>(src_col_schema),
static_cast<int>(l_idx)});
}
}
}
}
}
} // namespace cudf::io::detail::parquet
| 500d775375f5a1ff2ff090965ccf25e9e5aafcb7.cu | /*
* Copyright (c) 2022, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "reader_impl.hpp"
#include <io/comp/nvcomp_adapter.hpp>
#include <io/utilities/config_utils.hpp>
#include <io/utilities/time_utils.cuh>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/integer_utils.hpp>
#include <cudf/detail/utilities/vector_factories.hpp>
#include <rmm/exec_policy.hpp>
#include <thrust/binary_search.h>
#include <thrust/fill.h>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/iterator_categories.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/logical.h>
#include <thrust/reduce.h>
#include <thrust/scan.h>
#include <thrust/sequence.h>
#include <thrust/sort.h>
#include <thrust/transform.h>
#include <thrust/unique.h>
#include <numeric>
namespace cudf::io::detail::parquet {
namespace {
/**
* @brief Generate depth remappings for repetition and definition levels.
*
* When dealing with columns that contain lists, we must examine incoming
* repetition and definition level pairs to determine what range of output nesting
* is indicated when adding new values. This function generates the mappings of
* the R/D levels to those start/end bounds
*
* @param remap Maps column schema index to the R/D remapping vectors for that column
* @param src_col_schema The column schema to generate the new mapping for
* @param md File metadata information
*/
void generate_depth_remappings(std::map<int, std::pair<std::vector<int>, std::vector<int>>>& remap,
int src_col_schema,
aggregate_reader_metadata const& md)
{
// already generated for this level
if (remap.find(src_col_schema) != remap.end()) { return; }
auto schema = md.get_schema(src_col_schema);
int max_depth = md.get_output_nesting_depth(src_col_schema);
CUDF_EXPECTS(remap.find(src_col_schema) == remap.end(),
"Attempting to remap a schema more than once");
auto inserted =
remap.insert(std::pair<int, std::pair<std::vector<int>, std::vector<int>>>{src_col_schema, {}});
auto& depth_remap = inserted.first->second;
std::vector<int>& rep_depth_remap = (depth_remap.first);
rep_depth_remap.resize(schema.max_repetition_level + 1);
std::vector<int>& def_depth_remap = (depth_remap.second);
def_depth_remap.resize(schema.max_definition_level + 1);
// the key:
// for incoming level values R/D
// add values starting at the shallowest nesting level X has repetition level R
// until you reach the deepest nesting level Y that corresponds to the repetition level R1
// held by the nesting level that has definition level D
//
// Example: a 3 level struct with a list at the bottom
//
// R / D Depth
// level0 0 / 1 0
// level1 0 / 2 1
// level2 0 / 3 2
// list 0 / 3 3
// element 1 / 4 4
//
// incoming R/D : 0, 0 -> add values from depth 0 to 3 (def level 0 always maps to depth 0)
// incoming R/D : 0, 1 -> add values from depth 0 to 3
// incoming R/D : 0, 2 -> add values from depth 0 to 3
// incoming R/D : 1, 4 -> add values from depth 4 to 4
//
// Note : the -validity- of values is simply checked by comparing the incoming D value against the
// D value of the given nesting level (incoming D >= the D for the nesting level == valid,
// otherwise NULL). The tricky part is determining what nesting levels to add values at.
//
// For schemas with no repetition level (no lists), X is always 0 and Y is always max nesting
// depth.
//
// compute "X" from above
for (int s_idx = schema.max_repetition_level; s_idx >= 0; s_idx--) {
auto find_shallowest = [&](int r) {
int shallowest = -1;
int cur_depth = max_depth - 1;
int schema_idx = src_col_schema;
while (schema_idx > 0) {
auto cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_repetition_level == r) {
// if this is a repeated field, map it one level deeper
shallowest = cur_schema.is_stub() ? cur_depth + 1 : cur_depth;
}
// if it's one-level encoding list
else if (cur_schema.is_one_level_list()) {
shallowest = cur_depth - 1;
}
if (!cur_schema.is_stub()) { cur_depth--; }
schema_idx = cur_schema.parent_idx;
}
return shallowest;
};
rep_depth_remap[s_idx] = find_shallowest(s_idx);
}
// compute "Y" from above
for (int s_idx = schema.max_definition_level; s_idx >= 0; s_idx--) {
auto find_deepest = [&](int d) {
SchemaElement prev_schema;
int schema_idx = src_col_schema;
int r1 = 0;
while (schema_idx > 0) {
SchemaElement cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_definition_level == d) {
// if this is a repeated field, map it one level deeper
r1 = cur_schema.is_stub() ? prev_schema.max_repetition_level
: cur_schema.max_repetition_level;
break;
}
prev_schema = cur_schema;
schema_idx = cur_schema.parent_idx;
}
// we now know R1 from above. return the deepest nesting level that has the
// same repetition level
schema_idx = src_col_schema;
int depth = max_depth - 1;
while (schema_idx > 0) {
SchemaElement cur_schema = md.get_schema(schema_idx);
if (cur_schema.max_repetition_level == r1) {
// if this is a repeated field, map it one level deeper
depth = cur_schema.is_stub() ? depth + 1 : depth;
break;
}
if (!cur_schema.is_stub()) { depth--; }
prev_schema = cur_schema;
schema_idx = cur_schema.parent_idx;
}
return depth;
};
def_depth_remap[s_idx] = find_deepest(s_idx);
}
}
/**
* @brief Return the required number of bits to store a value.
*/
template <typename T = uint8_t>
[[nodiscard]] T required_bits(uint32_t max_level)
{
return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level));
}
/**
* @brief Converts cuDF units to Parquet units.
*
* @return A tuple of Parquet type width, Parquet clock rate and Parquet decimal type.
*/
[[nodiscard]] std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id,
type_id timestamp_type_id,
parquet::Type physical,
int8_t converted,
int32_t length)
{
int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0;
int32_t clock_rate = 0;
if (column_type_id == type_id::INT8 or column_type_id == type_id::UINT8) {
type_width = 1; // I32 -> I8
} else if (column_type_id == type_id::INT16 or column_type_id == type_id::UINT16) {
type_width = 2; // I32 -> I16
} else if (column_type_id == type_id::INT32) {
type_width = 4; // str -> hash32
} else if (is_chrono(data_type{column_type_id})) {
clock_rate = to_clockrate(timestamp_type_id);
}
int8_t converted_type = converted;
if (converted_type == parquet::DECIMAL && column_type_id != type_id::FLOAT64 &&
not cudf::is_fixed_point(data_type{column_type_id})) {
converted_type = parquet::UNKNOWN; // Not converting to float64 or decimal
}
return std::make_tuple(type_width, clock_rate, converted_type);
}
/**
* @brief Reads compressed page data to device memory.
*
* @param sources Dataset sources
* @param page_data Buffers to hold compressed page data for each chunk
* @param chunks List of column chunk descriptors
* @param begin_chunk Index of first column chunk to read
* @param end_chunk Index after the last column chunk to read
* @param column_chunk_offsets File offset for all chunks
* @param chunk_source_map Association between each column chunk and its source
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return A future object for reading synchronization
*/
[[nodiscard]] std::future<void> read_column_chunks_async(
std::vector<std::unique_ptr<datasource>> const& sources,
std::vector<std::unique_ptr<datasource::buffer>>& page_data,
hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
size_t begin_chunk,
size_t end_chunk,
const std::vector<size_t>& column_chunk_offsets,
std::vector<size_type> const& chunk_source_map,
rmm::cuda_stream_view stream)
{
// Transfer chunk data, coalescing adjacent chunks
std::vector<std::future<size_t>> read_tasks;
for (size_t chunk = begin_chunk; chunk < end_chunk;) {
const size_t io_offset = column_chunk_offsets[chunk];
size_t io_size = chunks[chunk].compressed_size;
size_t next_chunk = chunk + 1;
const bool is_compressed = (chunks[chunk].codec != parquet::Compression::UNCOMPRESSED);
while (next_chunk < end_chunk) {
const size_t next_offset = column_chunk_offsets[next_chunk];
const bool is_next_compressed =
(chunks[next_chunk].codec != parquet::Compression::UNCOMPRESSED);
if (next_offset != io_offset + io_size || is_next_compressed != is_compressed) {
// Can't merge if not contiguous or mixing compressed and uncompressed
// Not coalescing uncompressed with compressed chunks is so that compressed buffers can be
// freed earlier (immediately after decompression stage) to limit peak memory requirements
break;
}
io_size += chunks[next_chunk].compressed_size;
next_chunk++;
}
if (io_size != 0) {
auto& source = sources[chunk_source_map[chunk]];
if (source->is_device_read_preferred(io_size)) {
auto buffer = rmm::device_buffer(io_size, stream);
auto fut_read_size = source->device_read_async(
io_offset, io_size, static_cast<uint8_t*>(buffer.data()), stream);
read_tasks.emplace_back(std::move(fut_read_size));
page_data[chunk] = datasource::buffer::create(std::move(buffer));
} else {
auto const buffer = source->host_read(io_offset, io_size);
page_data[chunk] =
datasource::buffer::create(rmm::device_buffer(buffer->data(), buffer->size(), stream));
}
auto d_compdata = page_data[chunk]->data();
do {
chunks[chunk].compressed_data = d_compdata;
d_compdata += chunks[chunk].compressed_size;
} while (++chunk != next_chunk);
} else {
chunk = next_chunk;
}
}
auto sync_fn = [](decltype(read_tasks) read_tasks) {
for (auto& task : read_tasks) {
task.wait();
}
};
return std::async(std::launch::deferred, sync_fn, std::move(read_tasks));
}
/**
* @brief Return the number of total pages from the given column chunks.
*
* @param chunks List of column chunk descriptors
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return The total number of pages
*/
[[nodiscard]] size_t count_page_headers(hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
rmm::cuda_stream_view stream)
{
size_t total_pages = 0;
chunks.host_to_device(stream);
gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream);
chunks.device_to_host(stream, true);
for (size_t c = 0; c < chunks.size(); c++) {
total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages;
}
return total_pages;
}
/**
* @brief Decode the page information from the given column chunks.
*
* @param chunks List of column chunk descriptors
* @param pages List of page information
* @param stream CUDA stream used for device memory operations and kernel launches
*/
void decode_page_headers(hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
hostdevice_vector<gpu::PageInfo>& pages,
rmm::cuda_stream_view stream)
{
// IMPORTANT : if you change how pages are stored within a chunk (dist pages, then data pages),
// please update preprocess_nested_columns to reflect this.
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages;
chunks[c].page_info = pages.device_ptr(page_count);
page_count += chunks[c].max_num_pages;
}
chunks.host_to_device(stream);
gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream);
pages.device_to_host(stream, true);
}
/**
* @brief Decompresses the page data, at page granularity.
*
* @param chunks List of column chunk descriptors
* @param pages List of page information
* @param stream CUDA stream used for device memory operations and kernel launches
*
* @return Device buffer to decompressed page data
*/
[[nodiscard]] rmm::device_buffer decompress_page_data(
hostdevice_vector<gpu::ColumnChunkDesc>& chunks,
hostdevice_vector<gpu::PageInfo>& pages,
rmm::cuda_stream_view stream)
{
auto for_each_codec_page = [&](parquet::Compression codec, const std::function<void(size_t)>& f) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
const auto page_stride = chunks[c].max_num_pages;
if (chunks[c].codec == codec) {
for (int k = 0; k < page_stride; k++) {
f(page_count + k);
}
}
page_count += page_stride;
}
};
// Brotli scratch memory for decompressing
rmm::device_buffer debrotli_scratch;
// Count the exact number of compressed pages
size_t num_comp_pages = 0;
size_t total_decomp_size = 0;
struct codec_stats {
parquet::Compression compression_type = UNCOMPRESSED;
size_t num_pages = 0;
int32_t max_decompressed_size = 0;
size_t total_decomp_size = 0;
};
std::array codecs{codec_stats{parquet::GZIP},
codec_stats{parquet::SNAPPY},
codec_stats{parquet::BROTLI},
codec_stats{parquet::ZSTD}};
auto is_codec_supported = [&codecs](int8_t codec) {
if (codec == parquet::UNCOMPRESSED) return true;
return std::find_if(codecs.begin(), codecs.end(), [codec](auto& cstats) {
return codec == cstats.compression_type;
}) != codecs.end();
};
CUDF_EXPECTS(std::all_of(chunks.begin(),
chunks.end(),
[&is_codec_supported](auto const& chunk) {
return is_codec_supported(chunk.codec);
}),
"Unsupported compression type");
for (auto& codec : codecs) {
for_each_codec_page(codec.compression_type, [&](size_t page) {
auto page_uncomp_size = pages[page].uncompressed_page_size;
total_decomp_size += page_uncomp_size;
codec.total_decomp_size += page_uncomp_size;
codec.max_decompressed_size = std::max(codec.max_decompressed_size, page_uncomp_size);
codec.num_pages++;
num_comp_pages++;
});
if (codec.compression_type == parquet::BROTLI && codec.num_pages > 0) {
debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.num_pages), stream);
}
}
// Dispatch batches of pages to decompress for each codec
rmm::device_buffer decomp_pages(total_decomp_size, stream);
std::vector<device_span<uint8_t const>> comp_in;
comp_in.reserve(num_comp_pages);
std::vector<device_span<uint8_t>> comp_out;
comp_out.reserve(num_comp_pages);
// vectors to save v2 def and rep level data, if any
std::vector<device_span<uint8_t const>> copy_in;
copy_in.reserve(num_comp_pages);
std::vector<device_span<uint8_t>> copy_out;
copy_out.reserve(num_comp_pages);
rmm::device_uvector<compression_result> comp_res(num_comp_pages, stream);
thrust::fill(rmm::exec_policy(stream),
comp_res.begin(),
comp_res.end(),
compression_result{0, compression_status::FAILURE});
size_t decomp_offset = 0;
int32_t start_pos = 0;
for (const auto& codec : codecs) {
if (codec.num_pages == 0) { continue; }
for_each_codec_page(codec.compression_type, [&](size_t page_idx) {
auto const dst_base = static_cast<uint8_t*>(decomp_pages.data()) + decomp_offset;
auto& page = pages[page_idx];
// offset will only be non-zero for V2 pages
auto const offset = page.def_lvl_bytes + page.rep_lvl_bytes;
// for V2 need to copy def and rep level info into place, and then offset the
// input and output buffers. otherwise we'd have to keep both the compressed
// and decompressed data.
if (offset != 0) {
copy_in.emplace_back(page.page_data, offset);
copy_out.emplace_back(dst_base, offset);
}
comp_in.emplace_back(page.page_data + offset,
static_cast<size_t>(page.compressed_page_size - offset));
comp_out.emplace_back(dst_base + offset,
static_cast<size_t>(page.uncompressed_page_size - offset));
page.page_data = dst_base;
decomp_offset += page.uncompressed_page_size;
});
host_span<device_span<uint8_t const> const> comp_in_view{comp_in.data() + start_pos,
codec.num_pages};
auto const d_comp_in = cudf::detail::make_device_uvector_async(comp_in_view, stream);
host_span<device_span<uint8_t> const> comp_out_view(comp_out.data() + start_pos,
codec.num_pages);
auto const d_comp_out = cudf::detail::make_device_uvector_async(comp_out_view, stream);
device_span<compression_result> d_comp_res_view(comp_res.data() + start_pos, codec.num_pages);
switch (codec.compression_type) {
case parquet::GZIP:
gpuinflate(d_comp_in, d_comp_out, d_comp_res_view, gzip_header_included::YES, stream);
break;
case parquet::SNAPPY:
if (nvcomp_integration::is_stable_enabled()) {
nvcomp::batched_decompress(nvcomp::compression_type::SNAPPY,
d_comp_in,
d_comp_out,
d_comp_res_view,
codec.max_decompressed_size,
codec.total_decomp_size,
stream);
} else {
gpu_unsnap(d_comp_in, d_comp_out, d_comp_res_view, stream);
}
break;
case parquet::ZSTD:
nvcomp::batched_decompress(nvcomp::compression_type::ZSTD,
d_comp_in,
d_comp_out,
d_comp_res_view,
codec.max_decompressed_size,
codec.total_decomp_size,
stream);
break;
case parquet::BROTLI:
gpu_debrotli(d_comp_in,
d_comp_out,
d_comp_res_view,
debrotli_scratch.data(),
debrotli_scratch.size(),
stream);
break;
default: CUDF_FAIL("Unexpected decompression dispatch"); break;
}
start_pos += codec.num_pages;
}
CUDF_EXPECTS(thrust::all_of(rmm::exec_policy(stream),
comp_res.begin(),
comp_res.end(),
[] __device__(auto const& res) {
return res.status == compression_status::SUCCESS;
}),
"Error during decompression");
// now copy the uncompressed V2 def and rep level data
if (not copy_in.empty()) {
auto const d_copy_in = cudf::detail::make_device_uvector_async(copy_in, stream);
auto const d_copy_out = cudf::detail::make_device_uvector_async(copy_out, stream);
gpu_copy_uncompressed_blocks(d_copy_in, d_copy_out, stream);
stream.synchronize();
}
// Update the page information in device memory with the updated value of
// page_data; it now points to the uncompressed data buffer
pages.host_to_device(stream);
return decomp_pages;
}
} // namespace
void reader::impl::allocate_nesting_info()
{
auto const& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
auto& page_nesting_info = _file_itm_data.page_nesting_info;
// compute total # of page_nesting infos needed and allocate space. doing this in one
// buffer to keep it to a single gpu allocation
size_t const total_page_nesting_infos = std::accumulate(
chunks.host_ptr(), chunks.host_ptr() + chunks.size(), 0, [&](int total, auto& chunk) {
// the schema of the input column
auto const& schema = _metadata->get_schema(chunk.src_col_schema);
auto const per_page_nesting_info_size = max(
schema.max_definition_level + 1, _metadata->get_output_nesting_depth(chunk.src_col_schema));
return total + (per_page_nesting_info_size * chunk.num_data_pages);
});
page_nesting_info = hostdevice_vector<gpu::PageNestingInfo>{total_page_nesting_infos, _stream};
// retrieve from the gpu so we can update
pages.device_to_host(_stream, true);
// update pointers in the PageInfos
int target_page_index = 0;
int src_info_index = 0;
for (size_t idx = 0; idx < chunks.size(); idx++) {
int src_col_schema = chunks[idx].src_col_schema;
auto& schema = _metadata->get_schema(src_col_schema);
auto const per_page_nesting_info_size = std::max(
schema.max_definition_level + 1, _metadata->get_output_nesting_depth(src_col_schema));
// skip my dict pages
target_page_index += chunks[idx].num_dict_pages;
for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) {
pages[target_page_index + p_idx].nesting = page_nesting_info.device_ptr() + src_info_index;
pages[target_page_index + p_idx].num_nesting_levels = per_page_nesting_info_size;
src_info_index += per_page_nesting_info_size;
}
target_page_index += chunks[idx].num_data_pages;
}
// copy back to the gpu
pages.host_to_device(_stream);
// fill in
int nesting_info_index = 0;
std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping;
for (size_t idx = 0; idx < chunks.size(); idx++) {
int src_col_schema = chunks[idx].src_col_schema;
// schema of the input column
auto& schema = _metadata->get_schema(src_col_schema);
// real depth of the output cudf column hierarchy (1 == no nesting, 2 == 1 level, etc)
int max_depth = _metadata->get_output_nesting_depth(src_col_schema);
// # of nesting infos stored per page for this column
auto const per_page_nesting_info_size = std::max(schema.max_definition_level + 1, max_depth);
// if this column has lists, generate depth remapping
std::map<int, std::pair<std::vector<int>, std::vector<int>>> depth_remapping;
if (schema.max_repetition_level > 0) {
generate_depth_remappings(depth_remapping, src_col_schema, *_metadata);
}
// fill in host-side nesting info
int schema_idx = src_col_schema;
auto cur_schema = _metadata->get_schema(schema_idx);
int cur_depth = max_depth - 1;
while (schema_idx > 0) {
// stub columns (basically the inner field of a list scheme element) are not real columns.
// we can ignore them for the purposes of output nesting info
if (!cur_schema.is_stub()) {
// initialize each page within the chunk
for (int p_idx = 0; p_idx < chunks[idx].num_data_pages; p_idx++) {
gpu::PageNestingInfo* pni =
&page_nesting_info[nesting_info_index + (p_idx * per_page_nesting_info_size)];
// if we have lists, set our start and end depth remappings
if (schema.max_repetition_level > 0) {
auto remap = depth_remapping.find(src_col_schema);
CUDF_EXPECTS(remap != depth_remapping.end(),
"Could not find depth remapping for schema");
std::vector<int> const& rep_depth_remap = (remap->second.first);
std::vector<int> const& def_depth_remap = (remap->second.second);
for (size_t m = 0; m < rep_depth_remap.size(); m++) {
pni[m].start_depth = rep_depth_remap[m];
}
for (size_t m = 0; m < def_depth_remap.size(); m++) {
pni[m].end_depth = def_depth_remap[m];
}
}
// values indexed by output column index
pni[cur_depth].max_def_level = cur_schema.max_definition_level;
pni[cur_depth].max_rep_level = cur_schema.max_repetition_level;
pni[cur_depth].size = 0;
pni[cur_depth].type =
to_type_id(cur_schema, _strings_to_categorical, _timestamp_type.id());
pni[cur_depth].nullable = cur_schema.repetition_type == OPTIONAL;
}
// move up the hierarchy
cur_depth--;
}
// next schema
schema_idx = cur_schema.parent_idx;
cur_schema = _metadata->get_schema(schema_idx);
}
nesting_info_index += (per_page_nesting_info_size * chunks[idx].num_data_pages);
}
// copy nesting info to the device
page_nesting_info.host_to_device(_stream);
}
void reader::impl::load_and_decompress_data(std::vector<row_group_info> const& row_groups_info,
size_type num_rows)
{
// This function should never be called if `num_rows == 0`.
CUDF_EXPECTS(num_rows > 0, "Number of reading rows must not be zero.");
auto& raw_page_data = _file_itm_data.raw_page_data;
auto& decomp_page_data = _file_itm_data.decomp_page_data;
auto& chunks = _file_itm_data.chunks;
auto& pages_info = _file_itm_data.pages_info;
// Descriptors for all the chunks that make up the selected columns
const auto num_input_columns = _input_columns.size();
const auto num_chunks = row_groups_info.size() * num_input_columns;
chunks = hostdevice_vector<gpu::ColumnChunkDesc>(0, num_chunks, _stream);
// Association between each column chunk and its source
std::vector<size_type> chunk_source_map(num_chunks);
// Tracker for eventually deallocating compressed and uncompressed data
raw_page_data = std::vector<std::unique_ptr<datasource::buffer>>(num_chunks);
// Keep track of column chunk file offsets
std::vector<size_t> column_chunk_offsets(num_chunks);
// Initialize column chunk information
size_t total_decompressed_size = 0;
auto remaining_rows = num_rows;
std::vector<std::future<void>> read_rowgroup_tasks;
for (const auto& rg : row_groups_info) {
const auto& row_group = _metadata->get_row_group(rg.index, rg.source_index);
auto const row_group_start = rg.start_row;
auto const row_group_source = rg.source_index;
auto const row_group_rows = std::min<int>(remaining_rows, row_group.num_rows);
auto const io_chunk_idx = chunks.size();
// generate ColumnChunkDesc objects for everything to be decoded (all input columns)
for (size_t i = 0; i < num_input_columns; ++i) {
auto col = _input_columns[i];
// look up metadata
auto& col_meta = _metadata->get_column_metadata(rg.index, rg.source_index, col.schema_idx);
auto& schema = _metadata->get_schema(col.schema_idx);
auto [type_width, clock_rate, converted_type] =
conversion_info(to_type_id(schema, _strings_to_categorical, _timestamp_type.id()),
_timestamp_type.id(),
schema.type,
schema.converted_type,
schema.type_length);
column_chunk_offsets[chunks.size()] =
(col_meta.dictionary_page_offset != 0)
? std::min(col_meta.data_page_offset, col_meta.dictionary_page_offset)
: col_meta.data_page_offset;
chunks.push_back(gpu::ColumnChunkDesc(col_meta.total_compressed_size,
nullptr,
col_meta.num_values,
schema.type,
type_width,
row_group_start,
row_group_rows,
schema.max_definition_level,
schema.max_repetition_level,
_metadata->get_output_nesting_depth(col.schema_idx),
required_bits(schema.max_definition_level),
required_bits(schema.max_repetition_level),
col_meta.codec,
converted_type,
schema.logical_type,
schema.decimal_precision,
clock_rate,
i,
col.schema_idx));
// Map each column chunk to its column index and its source index
chunk_source_map[chunks.size() - 1] = row_group_source;
if (col_meta.codec != Compression::UNCOMPRESSED) {
total_decompressed_size += col_meta.total_uncompressed_size;
}
}
// Read compressed chunk data to device memory
read_rowgroup_tasks.push_back(read_column_chunks_async(_sources,
raw_page_data,
chunks,
io_chunk_idx,
chunks.size(),
column_chunk_offsets,
chunk_source_map,
_stream));
remaining_rows -= row_group.num_rows;
}
for (auto& task : read_rowgroup_tasks) {
task.wait();
}
CUDF_EXPECTS(remaining_rows <= 0, "All rows data must be read.");
// Process dataset chunk pages into output columns
auto const total_pages = count_page_headers(chunks, _stream);
pages_info = hostdevice_vector<gpu::PageInfo>(total_pages, total_pages, _stream);
if (total_pages > 0) {
// decoding of column/page information
decode_page_headers(chunks, pages_info, _stream);
if (total_decompressed_size > 0) {
decomp_page_data = decompress_page_data(chunks, pages_info, _stream);
// Free compressed data
for (size_t c = 0; c < chunks.size(); c++) {
if (chunks[c].codec != parquet::Compression::UNCOMPRESSED) {
raw_page_data[c].reset();
// TODO: Check if this is called
}
}
}
// build output column info
// walk the schema, building out_buffers that mirror what our final cudf columns will look
// like. important : there is not necessarily a 1:1 mapping between input columns and output
// columns. For example, parquet does not explicitly store a ColumnChunkDesc for struct
// columns. The "structiness" is simply implied by the schema. For example, this schema:
// required group field_id=1 name {
// required binary field_id=2 firstname (String);
// required binary field_id=3 middlename (String);
// required binary field_id=4 lastname (String);
// }
// will only contain 3 columns of data (firstname, middlename, lastname). But of course
// "name" is a struct column that we want to return, so we have to make sure that we
// create it ourselves.
// std::vector<output_column_info> output_info = build_output_column_info();
// nesting information (sizes, etc) stored -per page-
// note : even for flat schemas, we allocate 1 level of "nesting" info
allocate_nesting_info();
}
}
namespace {
struct cumulative_row_info {
size_t row_count; // cumulative row count
size_t size_bytes; // cumulative size in bytes
int key; // schema index
};
#if defined(PREPROCESS_DEBUG)
void print_pages(hostdevice_vector<gpu::PageInfo>& pages, rmm::cuda_stream_view _stream)
{
pages.device_to_host(_stream, true);
for (size_t idx = 0; idx < pages.size(); idx++) {
auto const& p = pages[idx];
// skip dictionary pages
if (p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) { continue; }
printf(
"P(%lu, s:%d): chunk_row(%d), num_rows(%d), skipped_values(%d), skipped_leaf_values(%d)\n",
idx,
p.src_col_schema,
p.chunk_row,
p.num_rows,
p.skipped_values,
p.skipped_leaf_values);
}
}
void print_cumulative_page_info(hostdevice_vector<gpu::PageInfo>& pages,
rmm::device_uvector<int32_t> const& page_index,
rmm::device_uvector<cumulative_row_info> const& c_info,
rmm::cuda_stream_view stream)
{
pages.device_to_host(stream, true);
printf("------------\nCumulative sizes by page\n");
std::vector<int> schemas(pages.size());
std::vector<int> h_page_index(pages.size());
cudaMemcpy(
h_page_index.data(), page_index.data(), sizeof(int) * pages.size(), cudaMemcpyDeviceToHost);
std::vector<cumulative_row_info> h_cinfo(pages.size());
cudaMemcpy(h_cinfo.data(),
c_info.data(),
sizeof(cumulative_row_info) * pages.size(),
cudaMemcpyDeviceToHost);
auto schema_iter = cudf::detail::make_counting_transform_iterator(
0, [&](size_type i) { return pages[h_page_index[i]].src_col_schema; });
thrust::copy(thrust::seq, schema_iter, schema_iter + pages.size(), schemas.begin());
auto last = thrust::unique(thrust::seq, schemas.begin(), schemas.end());
schemas.resize(last - schemas.begin());
printf("Num schemas: %lu\n", schemas.size());
for (size_t idx = 0; idx < schemas.size(); idx++) {
printf("Schema %d\n", schemas[idx]);
for (size_t pidx = 0; pidx < pages.size(); pidx++) {
auto const& page = pages[h_page_index[pidx]];
if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY || page.src_col_schema != schemas[idx]) {
continue;
}
printf("\tP: {%lu, %lu}\n", h_cinfo[pidx].row_count, h_cinfo[pidx].size_bytes);
}
}
}
void print_cumulative_row_info(
host_span<cumulative_row_info const> sizes,
std::string const& label,
std::optional<std::vector<gpu::chunk_read_info>> splits = std::nullopt)
{
if (splits.has_value()) {
printf("------------\nSplits\n");
for (size_t idx = 0; idx < splits->size(); idx++) {
printf("{%lu, %lu}\n", splits.value()[idx].skip_rows, splits.value()[idx].num_rows);
}
}
printf("------------\nCumulative sizes %s\n", label.c_str());
for (size_t idx = 0; idx < sizes.size(); idx++) {
printf("{%lu, %lu, %d}", sizes[idx].row_count, sizes[idx].size_bytes, sizes[idx].key);
if (splits.has_value()) {
// if we have a split at this row count and this is the last instance of this row count
auto start = thrust::make_transform_iterator(
splits->begin(), [](gpu::chunk_read_info const& i) { return i.skip_rows; });
auto end = start + splits->size();
auto split = std::find(start, end, sizes[idx].row_count);
auto const split_index = [&]() -> int {
if (split != end &&
((idx == sizes.size() - 1) || (sizes[idx + 1].row_count > sizes[idx].row_count))) {
return static_cast<int>(std::distance(start, split));
}
return idx == 0 ? 0 : -1;
}();
if (split_index >= 0) {
printf(" <-- split {%lu, %lu}",
splits.value()[split_index].skip_rows,
splits.value()[split_index].num_rows);
}
}
printf("\n");
}
}
#endif // PREPROCESS_DEBUG
/**
* @brief Functor which reduces two cumulative_row_info structs of the same key.
*/
struct cumulative_row_sum {
cumulative_row_info operator()
__device__(cumulative_row_info const& a, cumulative_row_info const& b) const
{
return cumulative_row_info{a.row_count + b.row_count, a.size_bytes + b.size_bytes, a.key};
}
};
/**
* @brief Functor which computes the total data size for a given type of cudf column.
*
* In the case of strings, the return size does not include the chars themselves. That
* information is tracked separately (see PageInfo::str_bytes).
*/
struct row_size_functor {
__device__ size_t validity_size(size_t num_rows, bool nullable)
{
return nullable ? (cudf::util::div_rounding_up_safe(num_rows, size_t{32}) * 4) : 0;
}
template <typename T>
__device__ size_t operator()(size_t num_rows, bool nullable)
{
auto const element_size = sizeof(device_storage_type_t<T>);
return (element_size * num_rows) + validity_size(num_rows, nullable);
}
};
template <>
__device__ size_t row_size_functor::operator()<list_view>(size_t num_rows, bool nullable)
{
auto const offset_size = sizeof(offset_type);
// NOTE: Adding the + 1 offset here isn't strictly correct. There will only be 1 extra offset
// for the entire column, whereas this is adding an extra offset per page. So we will get a
// small over-estimate of the real size of the order : # of pages * 4 bytes. It seems better
// to overestimate size somewhat than to underestimate it and potentially generate chunks
// that are too large.
return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable);
}
template <>
__device__ size_t row_size_functor::operator()<struct_view>(size_t num_rows, bool nullable)
{
return validity_size(num_rows, nullable);
}
template <>
__device__ size_t row_size_functor::operator()<string_view>(size_t num_rows, bool nullable)
{
// only returns the size of offsets and validity. the size of the actual string chars
// is tracked separately.
auto const offset_size = sizeof(offset_type);
// see note about offsets in the list_view template.
return (offset_size * (num_rows + 1)) + validity_size(num_rows, nullable);
}
/**
* @brief Functor which computes the total output cudf data size for all of
* the data in this page.
*
* Sums across all nesting levels.
*/
struct get_cumulative_row_info {
gpu::PageInfo const* const pages;
__device__ cumulative_row_info operator()(size_type index)
{
auto const& page = pages[index];
if (page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) {
return cumulative_row_info{0, 0, page.src_col_schema};
}
// total nested size, not counting string data
auto iter =
cudf::detail::make_counting_transform_iterator(0, [page, index] __device__(size_type i) {
auto const& pni = page.nesting[i];
return cudf::type_dispatcher(
data_type{pni.type}, row_size_functor{}, pni.size, pni.nullable);
});
size_t const row_count = static_cast<size_t>(page.nesting[0].size);
return {row_count,
thrust::reduce(thrust::seq, iter, iter + page.num_nesting_levels) + page.str_bytes,
page.src_col_schema};
}
};
/**
* @brief Functor which computes the effective size of all input columns by page.
*
* For a given row, we want to find the cost of all pages for all columns involved
* in loading up to that row. The complication here is that not all pages are the
* same size between columns. Example:
*
* page row counts
* Column A: 0 <----> 100 <----> 200
* Column B: 0 <---------------> 200 <--------> 400
|
* if we decide to split at row 100, we don't really know the actual amount of bytes in column B
* at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that
* page. Essentially, a conservative over-estimate of the real size.
*/
struct row_total_size {
cumulative_row_info const* c_info;
size_type const* key_offsets;
size_t num_keys;
__device__ cumulative_row_info operator()(cumulative_row_info const& i)
{
// sum sizes for each input column at this row
size_t sum = 0;
for (int idx = 0; idx < num_keys; idx++) {
auto const start = key_offsets[idx];
auto const end = key_offsets[idx + 1];
auto iter = cudf::detail::make_counting_transform_iterator(
0, [&] __device__(size_type i) { return c_info[i].row_count; });
auto const page_index =
thrust::lower_bound(thrust::seq, iter + start, iter + end, i.row_count) - iter;
sum += c_info[page_index].size_bytes;
}
return {i.row_count, sum, i.key};
}
};
/**
* @brief Given a vector of cumulative {row_count, byte_size} pairs and a chunk read
* limit, determine the set of splits.
*
* @param sizes Vector of cumulative {row_count, byte_size} pairs
* @param num_rows Total number of rows to read
* @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns
*/
std::vector<gpu::chunk_read_info> find_splits(std::vector<cumulative_row_info> const& sizes,
size_t num_rows,
size_t chunk_read_limit)
{
// now we have an array of {row_count, real output bytes}. just walk through it and generate
// splits.
// TODO: come up with a clever way to do this entirely in parallel. For now, as long as batch
// sizes are reasonably large, this shouldn't iterate too many times
std::vector<gpu::chunk_read_info> splits;
{
size_t cur_pos = 0;
size_t cur_cumulative_size = 0;
size_t cur_row_count = 0;
auto start = thrust::make_transform_iterator(sizes.begin(), [&](cumulative_row_info const& i) {
return i.size_bytes - cur_cumulative_size;
});
auto end = start + sizes.size();
while (cur_row_count < num_rows) {
int64_t split_pos =
thrust::lower_bound(thrust::seq, start + cur_pos, end, chunk_read_limit) - start;
// if we're past the end, or if the returned bucket is > than the chunk_read_limit, move back
// one.
if (static_cast<size_t>(split_pos) >= sizes.size() ||
(sizes[split_pos].size_bytes - cur_cumulative_size > chunk_read_limit)) {
split_pos--;
}
// best-try. if we can't find something that'll fit, we have to go bigger. we're doing this in
// a loop because all of the cumulative sizes for all the pages are sorted into one big list.
// so if we had two columns, both of which had an entry {1000, 10000}, that entry would be in
// the list twice. so we have to iterate until we skip past all of them. The idea is that we
// either do this, or we have to call unique() on the input first.
while (split_pos < (static_cast<int64_t>(sizes.size()) - 1) &&
(split_pos < 0 || sizes[split_pos].row_count == cur_row_count)) {
split_pos++;
}
auto const start_row = cur_row_count;
cur_row_count = sizes[split_pos].row_count;
splits.push_back(gpu::chunk_read_info{start_row, cur_row_count - start_row});
cur_pos = split_pos;
cur_cumulative_size = sizes[split_pos].size_bytes;
}
}
// print_cumulative_row_info(sizes, "adjusted", splits);
return splits;
}
/**
* @brief Given a set of pages that have had their sizes computed by nesting level and
* a limit on total read size, generate a set of {skip_rows, num_rows} pairs representing
* a set of reads that will generate output columns of total size <= `chunk_read_limit` bytes.
*
* @param pages All pages in the file
* @param id Additional intermediate information required to process the pages
* @param num_rows Total number of rows to read
* @param chunk_read_limit Limit on total number of bytes to be returned per read, for all columns
* @param stream CUDA stream to use, default 0
*/
std::vector<gpu::chunk_read_info> compute_splits(hostdevice_vector<gpu::PageInfo>& pages,
gpu::chunk_intermediate_data const& id,
size_t num_rows,
size_t chunk_read_limit,
rmm::cuda_stream_view stream)
{
auto const& page_keys = id.page_keys;
auto const& page_index = id.page_index;
// generate cumulative row counts and sizes
rmm::device_uvector<cumulative_row_info> c_info(page_keys.size(), stream);
// convert PageInfo to cumulative_row_info
auto page_input = thrust::make_transform_iterator(page_index.begin(),
get_cumulative_row_info{pages.device_ptr()});
thrust::inclusive_scan_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
page_input,
c_info.begin(),
thrust::equal_to{},
cumulative_row_sum{});
// print_cumulative_page_info(pages, page_index, c_info, stream);
// sort by row count
rmm::device_uvector<cumulative_row_info> c_info_sorted{c_info, stream};
thrust::sort(rmm::exec_policy(stream),
c_info_sorted.begin(),
c_info_sorted.end(),
[] __device__(cumulative_row_info const& a, cumulative_row_info const& b) {
return a.row_count < b.row_count;
});
std::vector<cumulative_row_info> h_c_info_sorted(c_info_sorted.size());
cudaMemcpy(h_c_info_sorted.data(),
c_info_sorted.data(),
sizeof(cumulative_row_info) * c_info_sorted.size(),
cudaMemcpyDeviceToHost);
// print_cumulative_row_info(h_c_info_sorted, "raw");
// generate key offsets (offsets to the start of each partition of keys). worst case is 1 page per
// key
rmm::device_uvector<size_type> key_offsets(page_keys.size() + 1, stream);
auto const key_offsets_end = thrust::reduce_by_key(rmm::exec_policy(stream),
page_keys.begin(),
page_keys.end(),
thrust::make_constant_iterator(1),
thrust::make_discard_iterator(),
key_offsets.begin())
.second;
size_t const num_unique_keys = key_offsets_end - key_offsets.begin();
thrust::exclusive_scan(
rmm::exec_policy(stream), key_offsets.begin(), key_offsets.end(), key_offsets.begin());
// adjust the cumulative info such that for each row count, the size includes any pages that span
// that row count. this is so that if we have this case:
// page row counts
// Column A: 0 <----> 100 <----> 200
// Column B: 0 <---------------> 200 <--------> 400
// |
// if we decide to split at row 100, we don't really know the actual amount of bytes in column B
// at that point. So we have to proceed as if we are taking the bytes from all 200 rows of that
// page.
//
rmm::device_uvector<cumulative_row_info> aggregated_info(c_info.size(), stream);
thrust::transform(rmm::exec_policy(stream),
c_info_sorted.begin(),
c_info_sorted.end(),
aggregated_info.begin(),
row_total_size{c_info.data(), key_offsets.data(), num_unique_keys});
// bring back to the cpu
std::vector<cumulative_row_info> h_aggregated_info(aggregated_info.size());
cudaMemcpyAsync(h_aggregated_info.data(),
aggregated_info.data(),
sizeof(cumulative_row_info) * c_info.size(),
cudaMemcpyDeviceToHost,
stream);
stream.synchronize();
return find_splits(h_aggregated_info, num_rows, chunk_read_limit);
}
struct get_page_chunk_idx {
__device__ size_type operator()(gpu::PageInfo const& page) { return page.chunk_idx; }
};
struct get_page_num_rows {
__device__ size_type operator()(gpu::PageInfo const& page) { return page.num_rows; }
};
struct get_page_schema {
__device__ size_type operator()(gpu::PageInfo const& page) { return page.src_col_schema; }
};
/**
* @brief Returns the size field of a PageInfo struct for a given depth, keyed by schema.
*/
struct get_page_nesting_size {
size_type const src_col_schema;
size_type const depth;
gpu::PageInfo const* const pages;
__device__ size_type operator()(int index) const
{
auto const& page = pages[index];
if (page.src_col_schema != src_col_schema || page.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) {
return 0;
}
return page.nesting[depth].batch_size;
}
};
/**
* @brief Writes to the chunk_row field of the PageInfo struct.
*/
struct chunk_row_output_iter {
gpu::PageInfo* p;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
__host__ __device__ chunk_row_output_iter operator+(int i)
{
return chunk_row_output_iter{p + i};
}
__host__ __device__ void operator++() { p++; }
__device__ reference operator[](int i) { return p[i].chunk_row; }
__device__ reference operator*() { return p->chunk_row; }
};
/**
* @brief Writes to the page_start_value field of the PageNestingInfo struct, keyed by schema.
*/
struct start_offset_output_iterator {
gpu::PageInfo* pages;
int const* page_indices;
int cur_index;
int src_col_schema;
int nesting_depth;
int empty = 0;
using value_type = size_type;
using difference_type = size_type;
using pointer = size_type*;
using reference = size_type&;
using iterator_category = thrust::output_device_iterator_tag;
constexpr void operator=(start_offset_output_iterator const& other)
{
pages = other.pages;
page_indices = other.page_indices;
cur_index = other.cur_index;
src_col_schema = other.src_col_schema;
nesting_depth = other.nesting_depth;
}
constexpr start_offset_output_iterator operator+(int i)
{
return start_offset_output_iterator{
pages, page_indices, cur_index + i, src_col_schema, nesting_depth};
}
constexpr void operator++() { cur_index++; }
__device__ reference operator[](int i) { return dereference(cur_index + i); }
__device__ reference operator*() { return dereference(cur_index); }
private:
__device__ reference dereference(int index)
{
gpu::PageInfo const& p = pages[page_indices[index]];
if (p.src_col_schema != src_col_schema || p.flags & gpu::PAGEINFO_FLAGS_DICTIONARY) {
return empty;
}
return p.nesting[nesting_depth].page_start_value;
}
};
} // anonymous namespace
void reader::impl::preprocess_pages(size_t skip_rows,
size_t num_rows,
bool uses_custom_row_bounds,
size_t chunk_read_limit)
{
auto& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
// iterate over all input columns and determine if they contain lists so we can further
// preprocess them.
bool has_lists = false;
for (size_t idx = 0; idx < _input_columns.size(); idx++) {
auto const& input_col = _input_columns[idx];
size_t const max_depth = input_col.nesting_depth();
auto* cols = &_output_buffers;
for (size_t l_idx = 0; l_idx < max_depth; l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// if this has a list parent, we have to get column sizes from the
// data computed during gpu::ComputePageSizes
if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) {
has_lists = true;
break;
}
}
if (has_lists) { break; }
}
// generate string dict indices if necessary
{
auto is_dict_chunk = [](const gpu::ColumnChunkDesc& chunk) {
return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0;
};
// Count the number of string dictionary entries
// NOTE: Assumes first page in the chunk is always the dictionary page
size_t total_str_dict_indexes = 0;
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
total_str_dict_indexes += pages[page_count].num_input_values;
}
page_count += chunks[c].max_num_pages;
}
// Build index for string dictionaries since they can't be indexed
// directly due to variable-sized elements
_chunk_itm_data.str_dict_index =
cudf::detail::make_zeroed_device_uvector_async<string_index_pair>(total_str_dict_indexes,
_stream);
// Update chunks with pointers to string dict indices
for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) {
input_column_info const& input_col = _input_columns[chunks[c].src_col_index];
CUDF_EXPECTS(input_col.schema_idx == chunks[c].src_col_schema,
"Column/page schema index mismatch");
if (is_dict_chunk(chunks[c])) {
chunks[c].str_dict_index = _chunk_itm_data.str_dict_index.data() + str_ofs;
str_ofs += pages[page_count].num_input_values;
}
// column_data_base will always point to leaf data, even for nested types.
page_count += chunks[c].max_num_pages;
}
if (total_str_dict_indexes > 0) {
chunks.host_to_device(_stream);
gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), _stream);
}
}
// intermediate data we will need for further chunked reads
if (has_lists || chunk_read_limit > 0) {
// computes:
// PageNestingInfo::num_rows for each page. the true number of rows (taking repetition into
// account), not just the number of values. PageNestingInfo::size for each level of nesting, for
// each page.
//
// we will be applying a later "trim" pass if skip_rows/num_rows is being used, which can happen
// if:
// - user has passed custom row bounds
// - we will be doing a chunked read
gpu::ComputePageSizes(pages,
chunks,
0, // 0-max size_t. process all possible rows
std::numeric_limits<size_t>::max(),
true, // compute num_rows
chunk_read_limit > 0, // compute string sizes
_stream);
// computes:
// PageInfo::chunk_row (the absolute start row index) for all pages
// Note: this is doing some redundant work for pages in flat hierarchies. chunk_row has already
// been computed during header decoding. the overall amount of work here is very small though.
auto key_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_chunk_idx{});
auto page_input = thrust::make_transform_iterator(pages.device_ptr(), get_page_num_rows{});
thrust::exclusive_scan_by_key(rmm::exec_policy(_stream),
key_input,
key_input + pages.size(),
page_input,
chunk_row_output_iter{pages.device_ptr()});
// compute page ordering.
//
// ordering of pages is by input column schema, repeated across row groups. so
// if we had 3 columns, each with 2 pages, and 1 row group, our schema values might look like
//
// 1, 1, 2, 2, 3, 3
//
// However, if we had more than one row group, the pattern would be
//
// 1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3
// ^ row group 0 |
// ^ row group 1
//
// To use exclusive_scan_by_key, the ordering we actually want is
//
// 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3
//
// We also need to preserve key-relative page ordering, so we need to use a stable sort.
_chunk_itm_data.page_keys = rmm::device_uvector<int>(pages.size(), _stream);
_chunk_itm_data.page_index = rmm::device_uvector<int>(pages.size(), _stream);
auto& page_keys = _chunk_itm_data.page_keys;
auto& page_index = _chunk_itm_data.page_index;
{
thrust::transform(rmm::exec_policy(_stream),
pages.device_ptr(),
pages.device_ptr() + pages.size(),
page_keys.begin(),
get_page_schema{});
thrust::sequence(rmm::exec_policy(_stream), page_index.begin(), page_index.end());
thrust::stable_sort_by_key(rmm::exec_policy(_stream),
page_keys.begin(),
page_keys.end(),
page_index.begin(),
thrust::less<int>());
}
// retrieve pages back
pages.device_to_host(_stream, true);
#if defined(PREPROCESS_DEBUG)
print_pages(pages, _stream);
#endif
}
// compute splits if necessary. otherwise return a single split representing
// the whole file.
_chunk_read_info = chunk_read_limit > 0
? compute_splits(pages, _chunk_itm_data, num_rows, chunk_read_limit, _stream)
: std::vector<gpu::chunk_read_info>{{skip_rows, num_rows}};
}
void reader::impl::allocate_columns(size_t skip_rows, size_t num_rows, bool uses_custom_row_bounds)
{
auto const& chunks = _file_itm_data.chunks;
auto& pages = _file_itm_data.pages_info;
// Should not reach here if there is no page data.
CUDF_EXPECTS(pages.size() > 0, "There is no page to parse");
// computes:
// PageNestingInfo::batch_size for each level of nesting, for each page, taking row bounds into
// account. PageInfo::skipped_values, which tells us where to start decoding in the input to
// respect the user bounds. It is only necessary to do this second pass if uses_custom_row_bounds
// is set (if the user has specified artificial bounds).
if (uses_custom_row_bounds) {
gpu::ComputePageSizes(pages,
chunks,
skip_rows,
num_rows,
false, // num_rows is already computed
false, // no need to compute string sizes
_stream);
#if defined(PREPROCESS_DEBUG)
print_pages(pages, _stream);
#endif
}
// iterate over all input columns and allocate any associated output
// buffers if they are not part of a list hierarchy. mark down
// if we have any list columns that need further processing.
bool has_lists = false;
for (size_t idx = 0; idx < _input_columns.size(); idx++) {
auto const& input_col = _input_columns[idx];
size_t const max_depth = input_col.nesting_depth();
auto* cols = &_output_buffers;
for (size_t l_idx = 0; l_idx < max_depth; l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// if this has a list parent, we have to get column sizes from the
// data computed during gpu::ComputePageSizes
if (out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) {
has_lists = true;
}
// if we haven't already processed this column because it is part of a struct hierarchy
else if (out_buf.size == 0) {
// add 1 for the offset if this is a list column
out_buf.create(
out_buf.type.id() == type_id::LIST && l_idx < max_depth ? num_rows + 1 : num_rows,
_stream,
_mr);
}
}
}
// compute output column sizes by examining the pages of the -input- columns
if (has_lists) {
auto& page_keys = _chunk_itm_data.page_keys;
auto& page_index = _chunk_itm_data.page_index;
for (size_t idx = 0; idx < _input_columns.size(); idx++) {
auto const& input_col = _input_columns[idx];
auto src_col_schema = input_col.schema_idx;
size_t max_depth = input_col.nesting_depth();
auto* cols = &_output_buffers;
for (size_t l_idx = 0; l_idx < input_col.nesting_depth(); l_idx++) {
auto& out_buf = (*cols)[input_col.nesting[l_idx]];
cols = &out_buf.children;
// size iterator. indexes pages by sorted order
auto size_input = thrust::make_transform_iterator(
page_index.begin(),
get_page_nesting_size{src_col_schema, static_cast<size_type>(l_idx), pages.device_ptr()});
// if this buffer is part of a list hierarchy, we need to determine it's
// final size and allocate it here.
//
// for struct columns, higher levels of the output columns are shared between input
// columns. so don't compute any given level more than once.
if ((out_buf.user_data & PARQUET_COLUMN_BUFFER_FLAG_HAS_LIST_PARENT) && out_buf.size == 0) {
int size =
thrust::reduce(rmm::exec_policy(_stream), size_input, size_input + pages.size());
// if this is a list column add 1 for non-leaf levels for the terminating offset
if (out_buf.type.id() == type_id::LIST && l_idx < max_depth) { size++; }
// allocate
out_buf.create(size, _stream, _mr);
}
// for nested hierarchies, compute per-page start offset
if (input_col.has_repetition) {
thrust::exclusive_scan_by_key(
rmm::exec_policy(_stream),
page_keys.begin(),
page_keys.end(),
size_input,
start_offset_output_iterator{pages.device_ptr(),
page_index.begin(),
0,
static_cast<int>(src_col_schema),
static_cast<int>(l_idx)});
}
}
}
}
}
} // namespace cudf::io::detail::parquet
|
3066ac512485a4fa9325840d7aafc5ea4c5790ee.hip | // !!! This is a file automatically generated by hipify!!!
#include "params.h"
#include "texmodel.cuh"
//using namespace aiv;
#ifdef MPI_ON
#include <mpi.h>
#endif
__constant__ float texStretchH;
__constant__ float2 texStretch[MAX_TEXS];
__constant__ float2 texShift[MAX_TEXS];
__constant__ float2 texStretchShow;
__constant__ float2 texShiftShow;
#ifdef USE_TEX_REFS
texture<coffS_t, hipTextureType3D, hipReadModeElementType> layerRefS;
texture<float , hipTextureType3D, hipReadModeElementType> layerRefV;
texture<float , hipTextureType3D, hipReadModeElementType> layerRefT;
texture<float , hipTextureType3D, hipReadModeElementType> layerRefTa;
texture<float , hipTextureType3D, hipReadModeElementType> layerRefTi;
#endif
void ModelTexs::init(){
int node=0, Nprocs=1;
#ifdef MPI_ON
MPI_Comm_rank (MPI_COMM_WORLD, &node);
MPI_Comm_size (MPI_COMM_WORLD, &Nprocs);
#endif
//---------------------------------------------------//--------------------------------------
ShowTexBinded=0;
Ntexs=1; // get from aivModel
if(Ntexs>MAX_TEXS) { printf("Error: Maximum number of texs is reached (%d>%d)\n", Ntexs, MAX_TEXS); exit(-1); }
HostLayerS = new coffS_t*[Ntexs]; HostLayerV = new float*[Ntexs]; HostLayerT = new float*[Ntexs]; HostLayerTi = new float*[Ntexs]; HostLayerTa = new float*[Ntexs];
for(int idev=0;idev<NDev;idev++) { DevLayerS[idev] = new hipArray*[Ntexs]; DevLayerV[idev] = new hipArray*[Ntexs]; DevLayerT[idev] = new hipArray*[Ntexs]; DevLayerTi[idev] = new hipArray*[Ntexs]; DevLayerTa[idev] = new hipArray*[Ntexs]; }
for(int idev=0;idev<NDev;idev++) { layerS_host[idev] = new hipTextureObject_t[Ntexs]; layerV_host[idev] = new hipTextureObject_t[Ntexs]; layerT_host[idev] = new hipTextureObject_t[Ntexs]; layerTi_host[idev] = new hipTextureObject_t[Ntexs]; layerTa_host[idev] = new hipTextureObject_t[Ntexs]; }
for(int idev=0;idev<NDev;idev++) { CHECK_ERROR( hipSetDevice(idev) );
CHECK_ERROR( hipMalloc((void**)&layerS [idev], Ntexs*sizeof(hipTextureObject_t)) );
CHECK_ERROR( hipMalloc((void**)&layerV [idev], Ntexs*sizeof(hipTextureObject_t)) );
CHECK_ERROR( hipMalloc((void**)&layerT [idev], Ntexs*sizeof(hipTextureObject_t)) );
CHECK_ERROR( hipMalloc((void**)&layerTi[idev], Ntexs*sizeof(hipTextureObject_t)) );
CHECK_ERROR( hipMalloc((void**)&layerTa[idev], Ntexs*sizeof(hipTextureObject_t)) );
}
CHECK_ERROR( hipSetDevice(0) );
int Nh=1; unsigned long long texsize_onhost=0, texsize_ondevs=0;
texN = new int3 [Ntexs]; //get from aivModel
tex0 = new int [Ntexs]; //get from aivModel
texStep = new float[Ntexs]; //get from aivModel
float2 texStretchHost[MAX_TEXS];
float2 texShiftHost[MAX_TEXS];
for(int ind=0; ind<Ntexs; ind++) {
#ifdef USE_AIVLIB_MODEL
//get texN from aivModel
get_texture_size(texN[ind].x, texN[ind].y, texN[ind].z);
#else
// My own texN
texN[ind].x = Np/2+1;
texN[ind].y = Nz/32+1;
texN[ind].z = Nh ;
tex0[ind] = 0 ;//in_Yee_cells
texStep[ind] = 3.0;//in_Yee_cells
#endif
tex0[ind] = 0 ;//in_Yee_cells
texStep[ind] = Np*3.0/(texN[ind].x-1);//in_Yee_cells
int texNwindow = int(ceil(Ns*NDT/texStep[ind])+2);
#ifdef CUDA_TEX_INTERP
texStretchHost[ind].x = 1.0/(2.0*texStep[ind]*texNwindow);
texStretchHost[ind].y = 1.0/(2*Nz)*(texN[ind].y-1)/texN[ind].y;
texShiftHost[ind].x = 1.0/(2.0*texNwindow);
texShiftHost[ind].y = 1.0/(2.0*texN[ind].y);
#else
texStretchHost[ind].x = 1.0/(2.0*texStep[ind]);
texStretchHost[ind].y = 1.0/(2*Nz)*(texN[ind].y-1);
texShiftHost[ind].x = 0.5;//texN[ind].x/(2.0*texNwindow);
texShiftHost[ind].y = 0.5;
#endif
texsize_onhost+= texN[ind].x*texN[ind].y*texN[ind].z;
texsize_ondevs+= texNwindow*texN[ind].y*texN[ind].z;
if(node==0) printf("Texture%d Size %dx%dx%d (Nx x Ny x Nh)\n", ind, texN[ind].x, texN[ind].y, texN[ind].z);
if(node==0) printf("Texture%d Stepx %g\n", ind, texStep[ind]);
if(texStep[ind]<NDT) { printf("Texture profile step is smaller than 3*Yee_cells; Is it right?\n"); exit(-1); }
}
#ifdef CUDA_TEX_INTERP
float2 texStretchShowHost = make_float2(1.0/(2*NDT*Np)*(texN[0].x-1)/texN[0].x, 0.);
float2 texShiftShowHost = make_float2(1./(2*texN[0].x), 0.);
h_scale = 2*((1<<30)/(2*texN[0].z)); const float texStretchH_host = 1.0/(texN[0].z*h_scale);
#else
float2 texStretchShowHost = make_float2(1.0/(2*NDT*Np)*(texN[0].x-1), 0.);
float2 texShiftShowHost = make_float2(0.5, 0.);
h_scale = 2*((1<<30)/(2*texN[0].z)); const float texStretchH_host = 1.0/h_scale;
#endif
for(int i=0; i<NDev; i++) {
CHECK_ERROR( hipSetDevice(i) );
CHECK_ERROR( hipMemcpyToSymbol(texStretchH ,&texStretchH_host, sizeof(float), 0, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpyToSymbol(texStretch , texStretchHost, sizeof(float2)*Ntexs, 0, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpyToSymbol(texShift , texShiftHost , sizeof(float2)*Ntexs, 0, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpyToSymbol(texStretchShow, &texStretchShowHost, sizeof(float2)*Ntexs, 0, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpyToSymbol(texShiftShow , &texShiftShowHost , sizeof(float2)*Ntexs, 0, hipMemcpyHostToDevice) );
}
CHECK_ERROR( hipSetDevice(0) );
if(node==0) printf("Textures data on host : %.3fMB\n", texsize_onhost*(sizeof(coffS_t)+2*sizeof(float))/(1024.*1024.));
if(node==0) printf("Textures data on devices: %.3fMB\n", texsize_ondevs*(sizeof(coffS_t)+2*sizeof(float))/(1024.*1024.));
hipChannelFormatDesc channelDesc;
for(int ind=0; ind<Ntexs; ind++) {
const int texNx = texN[ind].x, texNy = texN[ind].y, texNh = texN[ind].z;
int texNwindow = int(ceil(Ns*NDT/texStep[ind])+2);
HostLayerS[ind] = new coffS_t[texNx*texNy*texNh]; //get pointer from aivModel
HostLayerV[ind] = new float [texNx*texNy*texNh]; //get pointer from aivModel
#ifndef ANISO_TR
HostLayerT[ind] = new float [texNx*texNy*texNh]; //get pointer from aivModel
#elif ANISO_TR==1 || ANISO_TR==2 || ANISO_TR==3
HostLayerTi[ind] = new float [texNx*texNy*texNh]; //get pointer from aivModel
HostLayerTa[ind] = new float [texNx*texNy*texNh]; //get pointer from aivModel
HostLayerT[ind] = HostLayerTa[ind];
#endif
for(int idev=0;idev<NDev;idev++) { CHECK_ERROR( hipSetDevice(idev) );
printf("texNwindow=%d\n",texNwindow);
channelDesc = hipCreateChannelDesc<coffS_t>(); CHECK_ERROR( hipMalloc3DArray(&DevLayerS[idev][ind], &channelDesc, make_hipExtent(texNy,texNh,texNwindow)) );
channelDesc = hipCreateChannelDesc<float >(); CHECK_ERROR( hipMalloc3DArray(&DevLayerV[idev][ind], &channelDesc, make_hipExtent(texNy,texNh,texNwindow)) );
#ifndef ANISO_TR
channelDesc = hipCreateChannelDesc<float >(); CHECK_ERROR( hipMalloc3DArray(&DevLayerT [idev][ind], &channelDesc, make_hipExtent(texNy,texNh,texNwindow)) );
#elif ANISO_TR==1 || ANISO_TR==2 || ANISO_TR==3
channelDesc = hipCreateChannelDesc<float >(); CHECK_ERROR( hipMalloc3DArray(&DevLayerTi[idev][ind], &channelDesc, make_hipExtent(texNy,texNh,texNwindow)) );
channelDesc = hipCreateChannelDesc<float >(); CHECK_ERROR( hipMalloc3DArray(&DevLayerTa[idev][ind], &channelDesc, make_hipExtent(texNy,texNh,texNwindow)) );
#endif
}
CHECK_ERROR( hipSetDevice(0) );
ftype* rhoArr; rhoArr=new ftype[texNh+1];
for(int ix=0; ix<texNx; ix++) for(int iy=0; iy<texNy; iy++) {
for(int ih=0; ih<texNh; ih++) { //or get from aivModel
// remember about yshift for idev>0
float Vp=defCoff::Vp, Vs=defCoff::Vs, rho=defCoff::rho, drho=defCoff::drho;
ftype C11=Vp*Vp , C13=Vp*Vp-2*Vs*Vs, C12=Vp*Vp-2*Vs*Vs;
ftype C31=Vp*Vp-2*Vs*Vs, C33=Vp*Vp , C32=Vp*Vp-2*Vs*Vs;
ftype C21=Vp*Vp-2*Vs*Vs, C23=Vp*Vp-2*Vs*Vs, C22=Vp*Vp;
ftype C44=Vs*Vs, C66=Vs*Vs, C55=Vs*Vs;
#ifdef USE_AIVLIB_MODEL
//GeoPhysPar p = get_texture_cell(ix,iy,ih-((ih==texNh)?1:0)); Vp=p.Vp; Vs=p.Vs; rho=p.sigma; drho=1.0/rho;
GeoPhysParAniso p = get_texture_cell_aniso(ix,iy,ih-((ih==texNh)?1:0)); Vp=p.Vp; Vs=p.Vs; rho=p.sigma; drho=1.0/rho;
if(rho==0) drho=0;
ftype Vp_q = Vp, Vs_q1 = Vs, Vs_q2 = Vs;
//------Anisotropy flag-------//
// if(rho<0) { rho = -rho; Vp_q = p.Vp_q; Vs_q1 = p.Vs_q1; Vs_q2 = p.Vs_q2; }
// ftype eps = 0, delta = 0, gamma = 0;
// eps = Vp_q /Vp-1;
// delta = Vs_q1/Vs-1;
// gamma = Vs_q2/Vs-1
ftype eps = 0, delta = 0, gamma = 0;
eps = p.epsilon;
delta = p.delta;
gamma = p.gamma;
ftype xx = Vp*Vp;
ftype yy = (-Vs*Vs+sqrt((Vp*Vp-Vs*Vs)*(Vp*Vp*(1+2*delta)-Vs*Vs)));
ftype zz = (2*eps+1)*Vp*Vp - (2*gamma+1)*2*Vs*Vs;
ftype ww = (2*eps+1)*Vp*Vp;
ftype ii = Vs*Vs;
ftype aa = (2*gamma+1)*Vs*Vs;
//C11,C12,C13;
//C21,C22,C23;
//C31,C32,C33;
#else
//if(ix<texNx/4) Vp*= (1.0-0.5)/(texNx/4)*ix+0.5;
//if(ix>3*texNx/4) Vp*= (0.5-1.0)/(texNx/4)*ix+0.5+4*(1.0-0.5);
#endif
rhoArr[ih] = rho;
HostLayerV[ind][ix*texNy*texNh+ih*texNy+iy] = drho;
#ifndef ANISO_TR
HostLayerS[ind][ix*texNy*texNh+ih*texNy+iy] = make_float2( Vp*Vp, Vp*Vp-2*Vs*Vs )*rho;
HostLayerT[ind][ix*texNy*texNh+ih*texNy+iy] = Vs*Vs*rho;
#elif ANISO_TR==1
C11 = xx; C12 = yy; C23 = zz; C22 = ww; C44 = aa; C55 = ii;
HostLayerS[ind][ix*texNy*texNh+ih*texNy+iy] = make_float4( C11, C12, C23, C22 )*rho;
HostLayerTa[ind][ix*texNy*texNh+ih*texNy+iy] = C44*rho;
HostLayerTi[ind][ix*texNy*texNh+ih*texNy+iy] = C55*rho;
#elif ANISO_TR==2
C22 = xx; C12 = yy; C13 = zz; C11 = ww; C55 = aa; C44 = ii;
HostLayerS[ind][ix*texNy*texNh+ih*texNy+iy] = make_float4( C22, C12, C13, C11 )*rho;
HostLayerTa[ind][ix*texNy*texNh+ih*texNy+iy] = C55*rho;
HostLayerTi[ind][ix*texNy*texNh+ih*texNy+iy] = C44*rho;
#elif ANISO_TR==3
C33 = xx; C13 = yy; C12 = zz; C11 = ww; C66 = aa; C44 = ii;
HostLayerS[ind][ix*texNy*texNh+ih*texNy+iy] = make_float4( C33, C13, C12, C11 )*rho;
HostLayerTa[ind][ix*texNy*texNh+ih*texNy+iy] = C66*rho;
HostLayerTi[ind][ix*texNy*texNh+ih*texNy+iy] = C44*rho;
#else
#error ANISO_TYPE ANISO_TR not implemented yet
#endif
}
#ifdef USE_AIVLIB_MODEL
if(iy==0) { printf("Testing get_h ix=%d/%d \r", ix, texNx-1); fflush(stdout); }
int aivTexStepX=Np*NDT*2/(texNx-1); //in half-YeeCells
int aivTexStepY=2*Nz/(texNy-1); //in half-YeeCells
for(int xx=(ix==texNx-1?1:0); xx<((ix==0)?1:aivTexStepX); xx++) for(int yy=(iy==texNy-1?1:0); yy<((iy==0)?1:aivTexStepY); yy++) {
for(int iz=0; iz<Na*NDT*2; iz++) {
unsigned short h = get_h(ix*aivTexStepX-xx, iy*aivTexStepY-yy, min(0.,Npmly/2*NDT-iz*0.5)*da);
int id = h/(2*h_scale), idd=h%(2*h_scale);
//int id = floor((h)/double(1<<16)*112);
float rho1 = rhoArr[2*id];
float rho2 = rhoArr[2*id+1];
if(id<0 || 2*id>=texNh || idd>h_scale || rho1<=0 || rho2<=0)
printf("Error: ix=%d-%d iy=%d-%d iz=%g id=%d h%%h_scale=%d rho1=%g rho2=%g\n", ix*aivTexStepX, xx, iy*aivTexStepY, yy, -iz*0.5*da, id, idd, rho1,rho2);
}
}
#endif
}
delete rhoArr;
}
printf("\n");
for(int idev=0;idev<NDev;idev++) { CHECK_ERROR( hipSetDevice(idev) );
CHECK_ERROR( hipMemcpy(layerS [idev], layerS_host [idev], sizeof(hipTextureObject_t)*Ntexs, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpy(layerV [idev], layerV_host [idev], sizeof(hipTextureObject_t)*Ntexs, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpy(layerT [idev], layerT_host [idev], sizeof(hipTextureObject_t)*Ntexs, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpy(layerTi[idev], layerTi_host[idev], sizeof(hipTextureObject_t)*Ntexs, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpy(layerTa[idev], layerTa_host[idev], sizeof(hipTextureObject_t)*Ntexs, hipMemcpyHostToDevice) );
}
CHECK_ERROR( hipSetDevice(0) );
if(node==0) printf("creating texture objects...\n");
hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = hipResourceTypeArray;
for(int idev=0;idev<NDev;idev++) {
CHECK_ERROR(hipSetDevice(idev));
for(int ind=0; ind<Ntexs; ind++) {
/*const int texNx = ceil(Ns*NDT/texStep[ind])+2, texNy = texN[ind].y, texNh = texN[ind].z;
hipMemcpy3DParms copyparms={0}; copyparms.srcPos=make_hipPos(0,0,0); copyparms.dstPos=make_hipPos(0,0,0);
copyparms.kind=hipMemcpyHostToDevice;
copyparms.srcPtr = make_hipPitchedPtr(&HostLayerS[ind][0], texNh*sizeof(float2), texNh, texNy);
copyparms.dstArray = DevLayerS[idev][ind];
copyparms.extent = make_hipExtent(texNh,texNy,texNx);
CHECK_ERROR( hipMemcpy3D(©parms) );
copyparms.srcPtr = make_hipPitchedPtr(&HostLayerV[ind][0], texNh*sizeof(float ), texNh, texNy);
copyparms.dstArray = DevLayerV[idev][ind];
copyparms.extent = make_hipExtent(texNh,texNy,texNx);
CHECK_ERROR( hipMemcpy3D(©parms) );
copyparms.srcPtr = make_hipPitchedPtr(&HostLayerT[ind][0], texNh*sizeof(float ), texNh, texNy);
copyparms.dstArray = DevLayerT[idev][ind];
copyparms.extent = make_hipExtent(texNh,texNy,texNx);
CHECK_ERROR( hipMemcpy3D(©parms) );*/
hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc));
#ifdef CUDA_TEX_INTERP
texDesc.normalizedCoords = 1;
#else
texDesc.normalizedCoords = 0;
#endif
texDesc.filterMode = hipFilterModeLinear;
texDesc.addressMode[0] = hipAddressModeClamp; // in future try to test ModeBorder
texDesc.addressMode[1] = hipAddressModeClamp; // in future try to test ModeBorder
texDesc.addressMode[2] = hipAddressModeWrap;
resDesc.res.array.array = DevLayerS[idev][ind]; //CHECK_ERROR( hipCreateTextureObject(&layerS_host[idev][ind], &resDesc, &texDesc, NULL) );
resDesc.res.array.array = DevLayerV[idev][ind]; //CHECK_ERROR( hipCreateTextureObject(&layerV_host[idev][ind], &resDesc, &texDesc, NULL) );
resDesc.res.array.array = DevLayerT[idev][ind]; //CHECK_ERROR( hipCreateTextureObject(&layerT_host[idev][ind], &resDesc, &texDesc, NULL) );
if(ind==0){
#if TEX_MODEL_TYPE!=1
resDesc.res.array.array = DevLayerS[idev][ind]; //CHECK_ERROR( hipCreateTextureObject(&TexlayerS[idev], &resDesc, &texDesc, NULL) );
resDesc.res.array.array = DevLayerV[idev][ind]; //CHECK_ERROR( hipCreateTextureObject(&TexlayerV[idev], &resDesc, &texDesc, NULL) );
resDesc.res.array.array = DevLayerT[idev][ind]; //CHECK_ERROR( hipCreateTextureObject(&TexlayerT[idev], &resDesc, &texDesc, NULL) );
#endif
}
}
#ifdef USE_TEX_REFS
layerRefS.addressMode[0] = hipAddressModeClamp; layerRefV.addressMode[0] = hipAddressModeClamp; layerRefT.addressMode[0] = hipAddressModeClamp; layerRefTi.addressMode[0] = hipAddressModeClamp; layerRefTa.addressMode[0] = hipAddressModeClamp;
layerRefS.addressMode[1] = hipAddressModeClamp; layerRefV.addressMode[1] = hipAddressModeClamp; layerRefT.addressMode[1] = hipAddressModeClamp; layerRefTi.addressMode[1] = hipAddressModeClamp; layerRefTa.addressMode[1] = hipAddressModeClamp;
layerRefS.addressMode[2] = hipAddressModeWrap; layerRefV.addressMode[2] = hipAddressModeWrap; layerRefT.addressMode[2] = hipAddressModeWrap; layerRefTi.addressMode[2] = hipAddressModeWrap; layerRefTa.addressMode[2] = hipAddressModeWrap;
layerRefS.filterMode = hipFilterModeLinear; layerRefV.filterMode = hipFilterModeLinear; layerRefT.filterMode = hipFilterModeLinear;layerRefTi.filterMode = hipFilterModeLinear;layerRefTa.filterMode = hipFilterModeLinear;
#ifdef CUDA_TEX_INTERP
layerRefS.normalized = true; layerRefV.normalized = true; layerRefT.normalized = true; layerRefTi.normalized = true; layerRefTa.normalized = true;
#else
layerRefS.normalized =false; layerRefV.normalized =false; layerRefT.normalized =false; layerRefTi.normalized =false; layerRefTa.normalized =false;
#endif
channelDesc = hipCreateChannelDesc<coffS_t>(); CHECK_ERROR( hipBindTextureToArray(layerRefS , DevLayerS [idev][0], channelDesc) );
channelDesc = hipCreateChannelDesc<float >(); CHECK_ERROR( hipBindTextureToArray(layerRefV , DevLayerV [idev][0], channelDesc) );
#ifndef ANISO_TR
channelDesc = hipCreateChannelDesc<float >(); CHECK_ERROR( hipBindTextureToArray(layerRefT , DevLayerT [idev][0], channelDesc) );
#elif ANISO_TR==1 || ANISO_TR==2 || ANISO_TR==3
channelDesc = hipCreateChannelDesc<float >(); CHECK_ERROR( hipBindTextureToArray(layerRefTi, DevLayerTi[idev][0], channelDesc) );
channelDesc = hipCreateChannelDesc<float >(); CHECK_ERROR( hipBindTextureToArray(layerRefTa, DevLayerTa[idev][0], channelDesc) );
#endif//ANISO_TR
#endif//USE_TEX_REFS
CHECK_ERROR( hipMemcpy(layerS [idev], layerS_host [idev], sizeof(hipTextureObject_t)*Ntexs, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpy(layerV [idev], layerV_host [idev], sizeof(hipTextureObject_t)*Ntexs, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpy(layerT [idev], layerT_host [idev], sizeof(hipTextureObject_t)*Ntexs, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpy(layerTi[idev], layerTi_host[idev], sizeof(hipTextureObject_t)*Ntexs, hipMemcpyHostToDevice) );
CHECK_ERROR( hipMemcpy(layerTa[idev], layerTa_host[idev], sizeof(hipTextureObject_t)*Ntexs, hipMemcpyHostToDevice) );
}
CHECK_ERROR(hipSetDevice(0));
}
void ModelTexs::copyTexs(const int x1dev, const int x2dev, const int x1host, const int x2host, hipStream_t& streamCopy){
}
void ModelTexs::copyTexs(const int xdev, const int xhost, hipStream_t& streamCopy){
if(xhost==Np) for(int ind=0; ind<Ntexs; ind++) copyTexs(xhost+ceil(texStep[ind]/NDT), xhost+ceil(texStep[ind]/NDT), streamCopy);
for(int idev=0;idev<NDev;idev++) {
CHECK_ERROR(hipSetDevice(idev));
for(int ind=0; ind<Ntexs; ind++) {
int texNwindow = int(ceil(Ns*NDT/texStep[ind])+2);
//if(xhost*NDT<=tex0[ind] || xhost*NDT>tex0[ind]+texN[ind].x*texStep[ind]) continue;
if(xhost*NDT<=tex0[ind]) continue;
if(floor(xhost*NDT/texStep[ind])==floor((xhost-1)*NDT/texStep[ind])) continue;
int storeX = int(floor(xhost*NDT/texStep[ind])-1+texNwindow)%texNwindow;
int loadX = int(floor((xhost*NDT-tex0[ind])/texStep[ind])-1);
double numXf = NDT/texStep[ind];
int numX = (numXf<=1.0)?1:floor(numXf);
DEBUG_PRINT(("copy Textures to dev%d, ind=%d hostx=%d -> %d=devx (num=%d) // texNwindow=%d\n", idev, ind, loadX, storeX, numX, texNwindow));
const int texNz = texN[ind].y, texNy = texN[ind].z;
hipMemcpy3DParms copyparms={0}; copyparms.srcPos=make_hipPos(0,0,loadX); copyparms.dstPos=make_hipPos(0,0,storeX);
copyparms.kind=hipMemcpyHostToDevice;
copyparms.srcPtr = make_hipPitchedPtr(&HostLayerS[ind][0], texNz*sizeof(coffS_t), texNz, texNy);
copyparms.dstArray = DevLayerS[idev][ind];
copyparms.extent = make_hipExtent(texNz,texNy,numX);
CHECK_ERROR( hipMemcpy3DAsync(©parms, streamCopy) );
copyparms.srcPtr = make_hipPitchedPtr(&HostLayerV[ind][0], texNz*sizeof(float ), texNz, texNy);
copyparms.dstArray = DevLayerV[idev][ind];
copyparms.extent = make_hipExtent(texNz,texNy,numX);
CHECK_ERROR( hipMemcpy3DAsync(©parms, streamCopy) );
#ifndef ANISO_TR
copyparms.srcPtr = make_hipPitchedPtr(&HostLayerT[ind][0], texNz*sizeof(float ), texNz, texNy);
copyparms.dstArray = DevLayerT[idev][ind];
copyparms.extent = make_hipExtent(texNz,texNy,numX);
CHECK_ERROR( hipMemcpy3DAsync(©parms, streamCopy) );
#elif ANISO_TR==1 || ANISO_TR==2 || ANISO_TR==3
copyparms.srcPtr = make_hipPitchedPtr(&HostLayerTi[ind][0],texNz*sizeof(float ), texNz, texNy);
copyparms.dstArray = DevLayerTi[idev][ind];
copyparms.extent = make_hipExtent(texNz,texNy,numX);
CHECK_ERROR( hipMemcpy3DAsync(©parms, streamCopy) );
copyparms.srcPtr = make_hipPitchedPtr(&HostLayerTa[ind][0],texNz*sizeof(float ), texNz, texNy);
copyparms.dstArray = DevLayerTa[idev][ind];
copyparms.extent = make_hipExtent(texNz,texNy,numX);
CHECK_ERROR( hipMemcpy3DAsync(©parms, streamCopy) );
#else
#error UNKNOWN ANISO_TYPE
#endif
}
}
CHECK_ERROR(hipSetDevice(0));
}
void ModelRag::set(int x, int y) {
#if TEX_MODEL_TYPE==1
for(int i=0;i<4 ;i++) for(int iz=0;iz<Nz;iz++) I[i][iz]=0;
#endif
for(int i=0;i<32;i++) for(int iz=0;iz<Nz;iz++) { h[i][iz].x=0; h[i][iz].y=0; }
// set values from aivModel
// remember about yshift for idev>0
int idev=0; int ym=0;
while(y>=ym && idev<NDev) { ym+=NStripe[idev]; idev++; }
y-= idev-1;
const int d_index[64][3] = { {-3, +3, 1}, {-2, +3, 0}, {-2, +4, 1}, {-1, +4, 0}, {-1, +5, 1}, {+0, +5, 0},
{-2, +2, 1}, {-1, +2, 0}, {-1, +3, 1}, {+0, +3, 0}, {+0, +4, 1}, {+1, +4, 0},
{-1, +1, 1}, {+0, +1, 0}, {+0, +2, 1}, {+1, +2, 0}, {+1, +3, 1}, {+2, +3, 0},
{+0, +0, 1}, {+1, +0, 0}, {+1, +1, 1}, {+2, +1, 0}, {+2, +2, 1}, {+3, +2, 0},
{+1, -1, 1}, {+2, -1, 0}, {+2, +0, 1}, {+3, +0, 0}, {+3, +1, 1}, {+4, +1, 0},
{+2, -2, 1}, {+3, -2, 0}, {+3, -1, 1}, {+4, -1, 0}, {+4, +0, 1}, {+5, +0, 0},
{-3, +0, 1}, {-2, -1, 1}, {-1, -1, 0}, {-1, -2, 1},
{-2, +1, 1}, {-1, +1, 0}, {-1, +0, 1}, {+0, -1, 1}, {+1, -1, 0},
{-1, +2, 1}, {+0, +1, 1}, {+1, +1, 0}, {+1, +0, 1},
{+0, +3, 1}, {+1, +3, 0}, {+1, +2, 1}, {+2, +1, 1}, {+3, +1, 0},
{+1, +4, 1}, {+2, +3, 1}, {+3, +3, 0}, {+3, +2, 1},
{+2, +5, 1}, {+3, +5, 0}, {+3, +4, 1}, {+4, +3, 1}, {+5, +3, 0},
{0,0,0} };
#ifdef USE_AIVLIB_MODEL
const double corrCoff1 = 1.0/double(H_MAX_SIZE)*(parsHost.texs.texN[0].z-1);
const double corrCoff2 = 1.0/parsHost.texs.texN[0].z*H_MAX_SIZE;
for(int i=0;i<32;i++) for(int iz=0;iz<Nz;iz++) {
int3 x4h;
x4h = make_int3(x*2*NDT+d_index[2*i ][0], iz*2+d_index[2*i ][2], y*2*NDT+d_index[2*i ][1]); x4h = check_bounds(x4h);
h[i][iz].x = get_h(x4h.x, x4h.y, min(0.,Npmly/2*NDT-x4h.z*0.5)*dy) + parsHost.texs.h_scale/2;
//h[i][iz].x = ((x4h.x*x4h.y-x4h.z*0.5*dy)*corrCoff1+0.5)*corrCoff2;
x4h = make_int3(x*2*NDT+d_index[2*i+1][0], iz*2+d_index[2*i+1][2], y*2*NDT+d_index[2*i+1][1]); x4h = check_bounds(x4h);
h[i][iz].y = get_h(x4h.x, x4h.y, min(0.,Npmly/2*NDT-x4h.z*0.5)*dy) + parsHost.texs.h_scale/2;
//h[i][iz].y = ((x4h.x*x4h.y-x4h.z*0.5*dy)*corrCoff1+0.5)*corrCoff2;
}
#endif
}
| 3066ac512485a4fa9325840d7aafc5ea4c5790ee.cu | #include "params.h"
#include "texmodel.cuh"
//using namespace aiv;
#ifdef MPI_ON
#include <mpi.h>
#endif
__constant__ float texStretchH;
__constant__ float2 texStretch[MAX_TEXS];
__constant__ float2 texShift[MAX_TEXS];
__constant__ float2 texStretchShow;
__constant__ float2 texShiftShow;
#ifdef USE_TEX_REFS
texture<coffS_t, cudaTextureType3D, cudaReadModeElementType> layerRefS;
texture<float , cudaTextureType3D, cudaReadModeElementType> layerRefV;
texture<float , cudaTextureType3D, cudaReadModeElementType> layerRefT;
texture<float , cudaTextureType3D, cudaReadModeElementType> layerRefTa;
texture<float , cudaTextureType3D, cudaReadModeElementType> layerRefTi;
#endif
void ModelTexs::init(){
int node=0, Nprocs=1;
#ifdef MPI_ON
MPI_Comm_rank (MPI_COMM_WORLD, &node);
MPI_Comm_size (MPI_COMM_WORLD, &Nprocs);
#endif
//---------------------------------------------------//--------------------------------------
ShowTexBinded=0;
Ntexs=1; // get from aivModel
if(Ntexs>MAX_TEXS) { printf("Error: Maximum number of texs is reached (%d>%d)\n", Ntexs, MAX_TEXS); exit(-1); }
HostLayerS = new coffS_t*[Ntexs]; HostLayerV = new float*[Ntexs]; HostLayerT = new float*[Ntexs]; HostLayerTi = new float*[Ntexs]; HostLayerTa = new float*[Ntexs];
for(int idev=0;idev<NDev;idev++) { DevLayerS[idev] = new cudaArray*[Ntexs]; DevLayerV[idev] = new cudaArray*[Ntexs]; DevLayerT[idev] = new cudaArray*[Ntexs]; DevLayerTi[idev] = new cudaArray*[Ntexs]; DevLayerTa[idev] = new cudaArray*[Ntexs]; }
for(int idev=0;idev<NDev;idev++) { layerS_host[idev] = new cudaTextureObject_t[Ntexs]; layerV_host[idev] = new cudaTextureObject_t[Ntexs]; layerT_host[idev] = new cudaTextureObject_t[Ntexs]; layerTi_host[idev] = new cudaTextureObject_t[Ntexs]; layerTa_host[idev] = new cudaTextureObject_t[Ntexs]; }
for(int idev=0;idev<NDev;idev++) { CHECK_ERROR( cudaSetDevice(idev) );
CHECK_ERROR( cudaMalloc((void**)&layerS [idev], Ntexs*sizeof(cudaTextureObject_t)) );
CHECK_ERROR( cudaMalloc((void**)&layerV [idev], Ntexs*sizeof(cudaTextureObject_t)) );
CHECK_ERROR( cudaMalloc((void**)&layerT [idev], Ntexs*sizeof(cudaTextureObject_t)) );
CHECK_ERROR( cudaMalloc((void**)&layerTi[idev], Ntexs*sizeof(cudaTextureObject_t)) );
CHECK_ERROR( cudaMalloc((void**)&layerTa[idev], Ntexs*sizeof(cudaTextureObject_t)) );
}
CHECK_ERROR( cudaSetDevice(0) );
int Nh=1; unsigned long long texsize_onhost=0, texsize_ondevs=0;
texN = new int3 [Ntexs]; //get from aivModel
tex0 = new int [Ntexs]; //get from aivModel
texStep = new float[Ntexs]; //get from aivModel
float2 texStretchHost[MAX_TEXS];
float2 texShiftHost[MAX_TEXS];
for(int ind=0; ind<Ntexs; ind++) {
#ifdef USE_AIVLIB_MODEL
//get texN from aivModel
get_texture_size(texN[ind].x, texN[ind].y, texN[ind].z);
#else
// My own texN
texN[ind].x = Np/2+1;
texN[ind].y = Nz/32+1;
texN[ind].z = Nh ;
tex0[ind] = 0 ;//in_Yee_cells
texStep[ind] = 3.0;//in_Yee_cells
#endif
tex0[ind] = 0 ;//in_Yee_cells
texStep[ind] = Np*3.0/(texN[ind].x-1);//in_Yee_cells
int texNwindow = int(ceil(Ns*NDT/texStep[ind])+2);
#ifdef CUDA_TEX_INTERP
texStretchHost[ind].x = 1.0/(2.0*texStep[ind]*texNwindow);
texStretchHost[ind].y = 1.0/(2*Nz)*(texN[ind].y-1)/texN[ind].y;
texShiftHost[ind].x = 1.0/(2.0*texNwindow);
texShiftHost[ind].y = 1.0/(2.0*texN[ind].y);
#else
texStretchHost[ind].x = 1.0/(2.0*texStep[ind]);
texStretchHost[ind].y = 1.0/(2*Nz)*(texN[ind].y-1);
texShiftHost[ind].x = 0.5;//texN[ind].x/(2.0*texNwindow);
texShiftHost[ind].y = 0.5;
#endif
texsize_onhost+= texN[ind].x*texN[ind].y*texN[ind].z;
texsize_ondevs+= texNwindow*texN[ind].y*texN[ind].z;
if(node==0) printf("Texture%d Size %dx%dx%d (Nx x Ny x Nh)\n", ind, texN[ind].x, texN[ind].y, texN[ind].z);
if(node==0) printf("Texture%d Stepx %g\n", ind, texStep[ind]);
if(texStep[ind]<NDT) { printf("Texture profile step is smaller than 3*Yee_cells; Is it right?\n"); exit(-1); }
}
#ifdef CUDA_TEX_INTERP
float2 texStretchShowHost = make_float2(1.0/(2*NDT*Np)*(texN[0].x-1)/texN[0].x, 0.);
float2 texShiftShowHost = make_float2(1./(2*texN[0].x), 0.);
h_scale = 2*((1<<30)/(2*texN[0].z)); const float texStretchH_host = 1.0/(texN[0].z*h_scale);
#else
float2 texStretchShowHost = make_float2(1.0/(2*NDT*Np)*(texN[0].x-1), 0.);
float2 texShiftShowHost = make_float2(0.5, 0.);
h_scale = 2*((1<<30)/(2*texN[0].z)); const float texStretchH_host = 1.0/h_scale;
#endif
for(int i=0; i<NDev; i++) {
CHECK_ERROR( cudaSetDevice(i) );
CHECK_ERROR( cudaMemcpyToSymbol(texStretchH ,&texStretchH_host, sizeof(float), 0, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpyToSymbol(texStretch , texStretchHost, sizeof(float2)*Ntexs, 0, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpyToSymbol(texShift , texShiftHost , sizeof(float2)*Ntexs, 0, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpyToSymbol(texStretchShow, &texStretchShowHost, sizeof(float2)*Ntexs, 0, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpyToSymbol(texShiftShow , &texShiftShowHost , sizeof(float2)*Ntexs, 0, cudaMemcpyHostToDevice) );
}
CHECK_ERROR( cudaSetDevice(0) );
if(node==0) printf("Textures data on host : %.3fMB\n", texsize_onhost*(sizeof(coffS_t)+2*sizeof(float))/(1024.*1024.));
if(node==0) printf("Textures data on devices: %.3fMB\n", texsize_ondevs*(sizeof(coffS_t)+2*sizeof(float))/(1024.*1024.));
cudaChannelFormatDesc channelDesc;
for(int ind=0; ind<Ntexs; ind++) {
const int texNx = texN[ind].x, texNy = texN[ind].y, texNh = texN[ind].z;
int texNwindow = int(ceil(Ns*NDT/texStep[ind])+2);
HostLayerS[ind] = new coffS_t[texNx*texNy*texNh]; //get pointer from aivModel
HostLayerV[ind] = new float [texNx*texNy*texNh]; //get pointer from aivModel
#ifndef ANISO_TR
HostLayerT[ind] = new float [texNx*texNy*texNh]; //get pointer from aivModel
#elif ANISO_TR==1 || ANISO_TR==2 || ANISO_TR==3
HostLayerTi[ind] = new float [texNx*texNy*texNh]; //get pointer from aivModel
HostLayerTa[ind] = new float [texNx*texNy*texNh]; //get pointer from aivModel
HostLayerT[ind] = HostLayerTa[ind];
#endif
for(int idev=0;idev<NDev;idev++) { CHECK_ERROR( cudaSetDevice(idev) );
printf("texNwindow=%d\n",texNwindow);
channelDesc = cudaCreateChannelDesc<coffS_t>(); CHECK_ERROR( cudaMalloc3DArray(&DevLayerS[idev][ind], &channelDesc, make_cudaExtent(texNy,texNh,texNwindow)) );
channelDesc = cudaCreateChannelDesc<float >(); CHECK_ERROR( cudaMalloc3DArray(&DevLayerV[idev][ind], &channelDesc, make_cudaExtent(texNy,texNh,texNwindow)) );
#ifndef ANISO_TR
channelDesc = cudaCreateChannelDesc<float >(); CHECK_ERROR( cudaMalloc3DArray(&DevLayerT [idev][ind], &channelDesc, make_cudaExtent(texNy,texNh,texNwindow)) );
#elif ANISO_TR==1 || ANISO_TR==2 || ANISO_TR==3
channelDesc = cudaCreateChannelDesc<float >(); CHECK_ERROR( cudaMalloc3DArray(&DevLayerTi[idev][ind], &channelDesc, make_cudaExtent(texNy,texNh,texNwindow)) );
channelDesc = cudaCreateChannelDesc<float >(); CHECK_ERROR( cudaMalloc3DArray(&DevLayerTa[idev][ind], &channelDesc, make_cudaExtent(texNy,texNh,texNwindow)) );
#endif
}
CHECK_ERROR( cudaSetDevice(0) );
ftype* rhoArr; rhoArr=new ftype[texNh+1];
for(int ix=0; ix<texNx; ix++) for(int iy=0; iy<texNy; iy++) {
for(int ih=0; ih<texNh; ih++) { //or get from aivModel
// remember about yshift for idev>0
float Vp=defCoff::Vp, Vs=defCoff::Vs, rho=defCoff::rho, drho=defCoff::drho;
ftype C11=Vp*Vp , C13=Vp*Vp-2*Vs*Vs, C12=Vp*Vp-2*Vs*Vs;
ftype C31=Vp*Vp-2*Vs*Vs, C33=Vp*Vp , C32=Vp*Vp-2*Vs*Vs;
ftype C21=Vp*Vp-2*Vs*Vs, C23=Vp*Vp-2*Vs*Vs, C22=Vp*Vp;
ftype C44=Vs*Vs, C66=Vs*Vs, C55=Vs*Vs;
#ifdef USE_AIVLIB_MODEL
//GeoPhysPar p = get_texture_cell(ix,iy,ih-((ih==texNh)?1:0)); Vp=p.Vp; Vs=p.Vs; rho=p.sigma; drho=1.0/rho;
GeoPhysParAniso p = get_texture_cell_aniso(ix,iy,ih-((ih==texNh)?1:0)); Vp=p.Vp; Vs=p.Vs; rho=p.sigma; drho=1.0/rho;
if(rho==0) drho=0;
ftype Vp_q = Vp, Vs_q1 = Vs, Vs_q2 = Vs;
//------Anisotropy flag-------//
// if(rho<0) { rho = -rho; Vp_q = p.Vp_q; Vs_q1 = p.Vs_q1; Vs_q2 = p.Vs_q2; }
// ftype eps = 0, delta = 0, gamma = 0;
// eps = Vp_q /Vp-1;
// delta = Vs_q1/Vs-1;
// gamma = Vs_q2/Vs-1
ftype eps = 0, delta = 0, gamma = 0;
eps = p.epsilon;
delta = p.delta;
gamma = p.gamma;
ftype xx = Vp*Vp;
ftype yy = (-Vs*Vs+sqrt((Vp*Vp-Vs*Vs)*(Vp*Vp*(1+2*delta)-Vs*Vs)));
ftype zz = (2*eps+1)*Vp*Vp - (2*gamma+1)*2*Vs*Vs;
ftype ww = (2*eps+1)*Vp*Vp;
ftype ii = Vs*Vs;
ftype aa = (2*gamma+1)*Vs*Vs;
//C11,C12,C13;
//C21,C22,C23;
//C31,C32,C33;
#else
//if(ix<texNx/4) Vp*= (1.0-0.5)/(texNx/4)*ix+0.5;
//if(ix>3*texNx/4) Vp*= (0.5-1.0)/(texNx/4)*ix+0.5+4*(1.0-0.5);
#endif
rhoArr[ih] = rho;
HostLayerV[ind][ix*texNy*texNh+ih*texNy+iy] = drho;
#ifndef ANISO_TR
HostLayerS[ind][ix*texNy*texNh+ih*texNy+iy] = make_float2( Vp*Vp, Vp*Vp-2*Vs*Vs )*rho;
HostLayerT[ind][ix*texNy*texNh+ih*texNy+iy] = Vs*Vs*rho;
#elif ANISO_TR==1
C11 = xx; C12 = yy; C23 = zz; C22 = ww; C44 = aa; C55 = ii;
HostLayerS[ind][ix*texNy*texNh+ih*texNy+iy] = make_float4( C11, C12, C23, C22 )*rho;
HostLayerTa[ind][ix*texNy*texNh+ih*texNy+iy] = C44*rho;
HostLayerTi[ind][ix*texNy*texNh+ih*texNy+iy] = C55*rho;
#elif ANISO_TR==2
C22 = xx; C12 = yy; C13 = zz; C11 = ww; C55 = aa; C44 = ii;
HostLayerS[ind][ix*texNy*texNh+ih*texNy+iy] = make_float4( C22, C12, C13, C11 )*rho;
HostLayerTa[ind][ix*texNy*texNh+ih*texNy+iy] = C55*rho;
HostLayerTi[ind][ix*texNy*texNh+ih*texNy+iy] = C44*rho;
#elif ANISO_TR==3
C33 = xx; C13 = yy; C12 = zz; C11 = ww; C66 = aa; C44 = ii;
HostLayerS[ind][ix*texNy*texNh+ih*texNy+iy] = make_float4( C33, C13, C12, C11 )*rho;
HostLayerTa[ind][ix*texNy*texNh+ih*texNy+iy] = C66*rho;
HostLayerTi[ind][ix*texNy*texNh+ih*texNy+iy] = C44*rho;
#else
#error ANISO_TYPE ANISO_TR not implemented yet
#endif
}
#ifdef USE_AIVLIB_MODEL
if(iy==0) { printf("Testing get_h ix=%d/%d \r", ix, texNx-1); fflush(stdout); }
int aivTexStepX=Np*NDT*2/(texNx-1); //in half-YeeCells
int aivTexStepY=2*Nz/(texNy-1); //in half-YeeCells
for(int xx=(ix==texNx-1?1:0); xx<((ix==0)?1:aivTexStepX); xx++) for(int yy=(iy==texNy-1?1:0); yy<((iy==0)?1:aivTexStepY); yy++) {
for(int iz=0; iz<Na*NDT*2; iz++) {
unsigned short h = get_h(ix*aivTexStepX-xx, iy*aivTexStepY-yy, min(0.,Npmly/2*NDT-iz*0.5)*da);
int id = h/(2*h_scale), idd=h%(2*h_scale);
//int id = floor((h)/double(1<<16)*112);
float rho1 = rhoArr[2*id];
float rho2 = rhoArr[2*id+1];
if(id<0 || 2*id>=texNh || idd>h_scale || rho1<=0 || rho2<=0)
printf("Error: ix=%d-%d iy=%d-%d iz=%g id=%d h%%h_scale=%d rho1=%g rho2=%g\n", ix*aivTexStepX, xx, iy*aivTexStepY, yy, -iz*0.5*da, id, idd, rho1,rho2);
}
}
#endif
}
delete rhoArr;
}
printf("\n");
for(int idev=0;idev<NDev;idev++) { CHECK_ERROR( cudaSetDevice(idev) );
CHECK_ERROR( cudaMemcpy(layerS [idev], layerS_host [idev], sizeof(cudaTextureObject_t)*Ntexs, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpy(layerV [idev], layerV_host [idev], sizeof(cudaTextureObject_t)*Ntexs, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpy(layerT [idev], layerT_host [idev], sizeof(cudaTextureObject_t)*Ntexs, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpy(layerTi[idev], layerTi_host[idev], sizeof(cudaTextureObject_t)*Ntexs, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpy(layerTa[idev], layerTa_host[idev], sizeof(cudaTextureObject_t)*Ntexs, cudaMemcpyHostToDevice) );
}
CHECK_ERROR( cudaSetDevice(0) );
if(node==0) printf("creating texture objects...\n");
cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeArray;
for(int idev=0;idev<NDev;idev++) {
CHECK_ERROR(cudaSetDevice(idev));
for(int ind=0; ind<Ntexs; ind++) {
/*const int texNx = ceil(Ns*NDT/texStep[ind])+2, texNy = texN[ind].y, texNh = texN[ind].z;
cudaMemcpy3DParms copyparms={0}; copyparms.srcPos=make_cudaPos(0,0,0); copyparms.dstPos=make_cudaPos(0,0,0);
copyparms.kind=cudaMemcpyHostToDevice;
copyparms.srcPtr = make_cudaPitchedPtr(&HostLayerS[ind][0], texNh*sizeof(float2), texNh, texNy);
copyparms.dstArray = DevLayerS[idev][ind];
copyparms.extent = make_cudaExtent(texNh,texNy,texNx);
CHECK_ERROR( cudaMemcpy3D(©parms) );
copyparms.srcPtr = make_cudaPitchedPtr(&HostLayerV[ind][0], texNh*sizeof(float ), texNh, texNy);
copyparms.dstArray = DevLayerV[idev][ind];
copyparms.extent = make_cudaExtent(texNh,texNy,texNx);
CHECK_ERROR( cudaMemcpy3D(©parms) );
copyparms.srcPtr = make_cudaPitchedPtr(&HostLayerT[ind][0], texNh*sizeof(float ), texNh, texNy);
copyparms.dstArray = DevLayerT[idev][ind];
copyparms.extent = make_cudaExtent(texNh,texNy,texNx);
CHECK_ERROR( cudaMemcpy3D(©parms) );*/
cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc));
#ifdef CUDA_TEX_INTERP
texDesc.normalizedCoords = 1;
#else
texDesc.normalizedCoords = 0;
#endif
texDesc.filterMode = cudaFilterModeLinear;
texDesc.addressMode[0] = cudaAddressModeClamp; // in future try to test ModeBorder
texDesc.addressMode[1] = cudaAddressModeClamp; // in future try to test ModeBorder
texDesc.addressMode[2] = cudaAddressModeWrap;
resDesc.res.array.array = DevLayerS[idev][ind]; //CHECK_ERROR( cudaCreateTextureObject(&layerS_host[idev][ind], &resDesc, &texDesc, NULL) );
resDesc.res.array.array = DevLayerV[idev][ind]; //CHECK_ERROR( cudaCreateTextureObject(&layerV_host[idev][ind], &resDesc, &texDesc, NULL) );
resDesc.res.array.array = DevLayerT[idev][ind]; //CHECK_ERROR( cudaCreateTextureObject(&layerT_host[idev][ind], &resDesc, &texDesc, NULL) );
if(ind==0){
#if TEX_MODEL_TYPE!=1
resDesc.res.array.array = DevLayerS[idev][ind]; //CHECK_ERROR( cudaCreateTextureObject(&TexlayerS[idev], &resDesc, &texDesc, NULL) );
resDesc.res.array.array = DevLayerV[idev][ind]; //CHECK_ERROR( cudaCreateTextureObject(&TexlayerV[idev], &resDesc, &texDesc, NULL) );
resDesc.res.array.array = DevLayerT[idev][ind]; //CHECK_ERROR( cudaCreateTextureObject(&TexlayerT[idev], &resDesc, &texDesc, NULL) );
#endif
}
}
#ifdef USE_TEX_REFS
layerRefS.addressMode[0] = cudaAddressModeClamp; layerRefV.addressMode[0] = cudaAddressModeClamp; layerRefT.addressMode[0] = cudaAddressModeClamp; layerRefTi.addressMode[0] = cudaAddressModeClamp; layerRefTa.addressMode[0] = cudaAddressModeClamp;
layerRefS.addressMode[1] = cudaAddressModeClamp; layerRefV.addressMode[1] = cudaAddressModeClamp; layerRefT.addressMode[1] = cudaAddressModeClamp; layerRefTi.addressMode[1] = cudaAddressModeClamp; layerRefTa.addressMode[1] = cudaAddressModeClamp;
layerRefS.addressMode[2] = cudaAddressModeWrap; layerRefV.addressMode[2] = cudaAddressModeWrap; layerRefT.addressMode[2] = cudaAddressModeWrap; layerRefTi.addressMode[2] = cudaAddressModeWrap; layerRefTa.addressMode[2] = cudaAddressModeWrap;
layerRefS.filterMode = cudaFilterModeLinear; layerRefV.filterMode = cudaFilterModeLinear; layerRefT.filterMode = cudaFilterModeLinear;layerRefTi.filterMode = cudaFilterModeLinear;layerRefTa.filterMode = cudaFilterModeLinear;
#ifdef CUDA_TEX_INTERP
layerRefS.normalized = true; layerRefV.normalized = true; layerRefT.normalized = true; layerRefTi.normalized = true; layerRefTa.normalized = true;
#else
layerRefS.normalized =false; layerRefV.normalized =false; layerRefT.normalized =false; layerRefTi.normalized =false; layerRefTa.normalized =false;
#endif
channelDesc = cudaCreateChannelDesc<coffS_t>(); CHECK_ERROR( cudaBindTextureToArray(layerRefS , DevLayerS [idev][0], channelDesc) );
channelDesc = cudaCreateChannelDesc<float >(); CHECK_ERROR( cudaBindTextureToArray(layerRefV , DevLayerV [idev][0], channelDesc) );
#ifndef ANISO_TR
channelDesc = cudaCreateChannelDesc<float >(); CHECK_ERROR( cudaBindTextureToArray(layerRefT , DevLayerT [idev][0], channelDesc) );
#elif ANISO_TR==1 || ANISO_TR==2 || ANISO_TR==3
channelDesc = cudaCreateChannelDesc<float >(); CHECK_ERROR( cudaBindTextureToArray(layerRefTi, DevLayerTi[idev][0], channelDesc) );
channelDesc = cudaCreateChannelDesc<float >(); CHECK_ERROR( cudaBindTextureToArray(layerRefTa, DevLayerTa[idev][0], channelDesc) );
#endif//ANISO_TR
#endif//USE_TEX_REFS
CHECK_ERROR( cudaMemcpy(layerS [idev], layerS_host [idev], sizeof(cudaTextureObject_t)*Ntexs, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpy(layerV [idev], layerV_host [idev], sizeof(cudaTextureObject_t)*Ntexs, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpy(layerT [idev], layerT_host [idev], sizeof(cudaTextureObject_t)*Ntexs, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpy(layerTi[idev], layerTi_host[idev], sizeof(cudaTextureObject_t)*Ntexs, cudaMemcpyHostToDevice) );
CHECK_ERROR( cudaMemcpy(layerTa[idev], layerTa_host[idev], sizeof(cudaTextureObject_t)*Ntexs, cudaMemcpyHostToDevice) );
}
CHECK_ERROR(cudaSetDevice(0));
}
void ModelTexs::copyTexs(const int x1dev, const int x2dev, const int x1host, const int x2host, cudaStream_t& streamCopy){
}
void ModelTexs::copyTexs(const int xdev, const int xhost, cudaStream_t& streamCopy){
if(xhost==Np) for(int ind=0; ind<Ntexs; ind++) copyTexs(xhost+ceil(texStep[ind]/NDT), xhost+ceil(texStep[ind]/NDT), streamCopy);
for(int idev=0;idev<NDev;idev++) {
CHECK_ERROR(cudaSetDevice(idev));
for(int ind=0; ind<Ntexs; ind++) {
int texNwindow = int(ceil(Ns*NDT/texStep[ind])+2);
//if(xhost*NDT<=tex0[ind] || xhost*NDT>tex0[ind]+texN[ind].x*texStep[ind]) continue;
if(xhost*NDT<=tex0[ind]) continue;
if(floor(xhost*NDT/texStep[ind])==floor((xhost-1)*NDT/texStep[ind])) continue;
int storeX = int(floor(xhost*NDT/texStep[ind])-1+texNwindow)%texNwindow;
int loadX = int(floor((xhost*NDT-tex0[ind])/texStep[ind])-1);
double numXf = NDT/texStep[ind];
int numX = (numXf<=1.0)?1:floor(numXf);
DEBUG_PRINT(("copy Textures to dev%d, ind=%d hostx=%d -> %d=devx (num=%d) // texNwindow=%d\n", idev, ind, loadX, storeX, numX, texNwindow));
const int texNz = texN[ind].y, texNy = texN[ind].z;
cudaMemcpy3DParms copyparms={0}; copyparms.srcPos=make_cudaPos(0,0,loadX); copyparms.dstPos=make_cudaPos(0,0,storeX);
copyparms.kind=cudaMemcpyHostToDevice;
copyparms.srcPtr = make_cudaPitchedPtr(&HostLayerS[ind][0], texNz*sizeof(coffS_t), texNz, texNy);
copyparms.dstArray = DevLayerS[idev][ind];
copyparms.extent = make_cudaExtent(texNz,texNy,numX);
CHECK_ERROR( cudaMemcpy3DAsync(©parms, streamCopy) );
copyparms.srcPtr = make_cudaPitchedPtr(&HostLayerV[ind][0], texNz*sizeof(float ), texNz, texNy);
copyparms.dstArray = DevLayerV[idev][ind];
copyparms.extent = make_cudaExtent(texNz,texNy,numX);
CHECK_ERROR( cudaMemcpy3DAsync(©parms, streamCopy) );
#ifndef ANISO_TR
copyparms.srcPtr = make_cudaPitchedPtr(&HostLayerT[ind][0], texNz*sizeof(float ), texNz, texNy);
copyparms.dstArray = DevLayerT[idev][ind];
copyparms.extent = make_cudaExtent(texNz,texNy,numX);
CHECK_ERROR( cudaMemcpy3DAsync(©parms, streamCopy) );
#elif ANISO_TR==1 || ANISO_TR==2 || ANISO_TR==3
copyparms.srcPtr = make_cudaPitchedPtr(&HostLayerTi[ind][0],texNz*sizeof(float ), texNz, texNy);
copyparms.dstArray = DevLayerTi[idev][ind];
copyparms.extent = make_cudaExtent(texNz,texNy,numX);
CHECK_ERROR( cudaMemcpy3DAsync(©parms, streamCopy) );
copyparms.srcPtr = make_cudaPitchedPtr(&HostLayerTa[ind][0],texNz*sizeof(float ), texNz, texNy);
copyparms.dstArray = DevLayerTa[idev][ind];
copyparms.extent = make_cudaExtent(texNz,texNy,numX);
CHECK_ERROR( cudaMemcpy3DAsync(©parms, streamCopy) );
#else
#error UNKNOWN ANISO_TYPE
#endif
}
}
CHECK_ERROR(cudaSetDevice(0));
}
void ModelRag::set(int x, int y) {
#if TEX_MODEL_TYPE==1
for(int i=0;i<4 ;i++) for(int iz=0;iz<Nz;iz++) I[i][iz]=0;
#endif
for(int i=0;i<32;i++) for(int iz=0;iz<Nz;iz++) { h[i][iz].x=0; h[i][iz].y=0; }
// set values from aivModel
// remember about yshift for idev>0
int idev=0; int ym=0;
while(y>=ym && idev<NDev) { ym+=NStripe[idev]; idev++; }
y-= idev-1;
const int d_index[64][3] = { {-3, +3, 1}, {-2, +3, 0}, {-2, +4, 1}, {-1, +4, 0}, {-1, +5, 1}, {+0, +5, 0},
{-2, +2, 1}, {-1, +2, 0}, {-1, +3, 1}, {+0, +3, 0}, {+0, +4, 1}, {+1, +4, 0},
{-1, +1, 1}, {+0, +1, 0}, {+0, +2, 1}, {+1, +2, 0}, {+1, +3, 1}, {+2, +3, 0},
{+0, +0, 1}, {+1, +0, 0}, {+1, +1, 1}, {+2, +1, 0}, {+2, +2, 1}, {+3, +2, 0},
{+1, -1, 1}, {+2, -1, 0}, {+2, +0, 1}, {+3, +0, 0}, {+3, +1, 1}, {+4, +1, 0},
{+2, -2, 1}, {+3, -2, 0}, {+3, -1, 1}, {+4, -1, 0}, {+4, +0, 1}, {+5, +0, 0},
{-3, +0, 1}, {-2, -1, 1}, {-1, -1, 0}, {-1, -2, 1},
{-2, +1, 1}, {-1, +1, 0}, {-1, +0, 1}, {+0, -1, 1}, {+1, -1, 0},
{-1, +2, 1}, {+0, +1, 1}, {+1, +1, 0}, {+1, +0, 1},
{+0, +3, 1}, {+1, +3, 0}, {+1, +2, 1}, {+2, +1, 1}, {+3, +1, 0},
{+1, +4, 1}, {+2, +3, 1}, {+3, +3, 0}, {+3, +2, 1},
{+2, +5, 1}, {+3, +5, 0}, {+3, +4, 1}, {+4, +3, 1}, {+5, +3, 0},
{0,0,0} };
#ifdef USE_AIVLIB_MODEL
const double corrCoff1 = 1.0/double(H_MAX_SIZE)*(parsHost.texs.texN[0].z-1);
const double corrCoff2 = 1.0/parsHost.texs.texN[0].z*H_MAX_SIZE;
for(int i=0;i<32;i++) for(int iz=0;iz<Nz;iz++) {
int3 x4h;
x4h = make_int3(x*2*NDT+d_index[2*i ][0], iz*2+d_index[2*i ][2], y*2*NDT+d_index[2*i ][1]); x4h = check_bounds(x4h);
h[i][iz].x = get_h(x4h.x, x4h.y, min(0.,Npmly/2*NDT-x4h.z*0.5)*dy) + parsHost.texs.h_scale/2;
//h[i][iz].x = ((x4h.x*x4h.y-x4h.z*0.5*dy)*corrCoff1+0.5)*corrCoff2;
x4h = make_int3(x*2*NDT+d_index[2*i+1][0], iz*2+d_index[2*i+1][2], y*2*NDT+d_index[2*i+1][1]); x4h = check_bounds(x4h);
h[i][iz].y = get_h(x4h.x, x4h.y, min(0.,Npmly/2*NDT-x4h.z*0.5)*dy) + parsHost.texs.h_scale/2;
//h[i][iz].y = ((x4h.x*x4h.y-x4h.z*0.5*dy)*corrCoff1+0.5)*corrCoff2;
}
#endif
}
|
583ec4ac4df5216f619af17fc9017eb1b4d7dd57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <jacobi7_cuda.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
void initial_data(float *h_A, float *h_B, const int xyz){
// randomly generaed test data
srand(time(NULL));
int i = 0;
for(; i < xyz; i++) {
h_A[i] = 1 + (float)rand() / (float)RAND_MAX;
h_B[i] = h_A[i];
}
}
int main(int argc, char* *argv){
if(argc != 7) {
printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TIME STEPS>\n", argv[0]);
return 1;
}
// program parameters trans
const int nx = atoi(argv[1]);
const int ny = atoi(argv[2]);
const int nz = atoi(argv[3]);
const int tx = atoi(argv[4]);
const int ty = atoi(argv[5]);
const int timesteps = atoi(argv[6]);
int devId = 0;
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, devId));
printf("Device : %s\n", prop.name);
checkCuda( hipSetDevice(devId));
const int xyz = nx * ny * nz;
const int xyz_bytes = xyz * sizeof(float);
const int nstreams = 4;
const int streamSize = nz/nstreams; // sliced along z dimension
const int streamBytes = (streamSize+2) * nx * ny * sizeof(float);
float *h_A;
float *h_B;
float *d_A;
float *d_B;
// Allocate host buffers
checkCuda(hipHostMalloc((void**)&h_A, xyz_bytes)); // host pinned
checkCuda(hipHostMalloc((void**)&h_B, xyz_bytes));
checkCuda(hipMalloc((void**)&d_A, xyz_bytes)); // device
checkCuda(hipMalloc((void**)&d_B, xyz_bytes));
// grid data iniatialization
initial_data(h_A, h_B, xyz);
float fac = 6.0/(h_A[0] * h_A[0]);
dim3 grid(nx/tx, ny/ty);
dim3 block(tx, ty);
float* input = d_A;
float* output = d_B;
float ms; // elapsed time in milliseconds
// create events and streams
hipEvent_t startEvent, stopEvent, dummyEvent;
hipStream_t stream[nstreams];
checkCuda( hipEventCreate(&startEvent) );
checkCuda( hipEventCreate(&stopEvent) );
checkCuda( hipEventCreate(&dummyEvent) );
for (int i = 0; i < nstreams; ++i)
checkCuda( hipStreamCreate(&stream[i]) );
float *tmp;
// baseline case - sequential transfer and execute
checkCuda( hipEventRecord(startEvent,0) );
checkCuda( hipMemcpy(d_A, h_A, xyz_bytes, hipMemcpyHostToDevice));
checkCuda( hipMemcpy(d_B, d_A, xyz_bytes, hipMemcpyDeviceToDevice));
// Run the GPU kernel
for(int t = 0; t < timesteps; t += 1) {
hipLaunchKernelGGL(( jacobi3d_7p_glmem), dim3(grid), dim3(block), 0, 0, input, output, nx, ny, nz, fac);
// swap input and output
tmp = input;
input = output;
output = tmp;
}
if(timesteps%2==0)
checkCuda( hipMemcpy(h_A, output, xyz_bytes, hipMemcpyDeviceToHost));
else
checkCuda( hipMemcpy(h_A, input, xyz_bytes, hipMemcpyDeviceToHost));
checkCuda( hipEventRecord(stopEvent, 0));
checkCuda( hipEventSynchronize(stopEvent));
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent));
printf("Time for sequential transfer and execute (ms): %f\n", ms);
// aysnchronous version 1: loop over {copy, kernel, copy}
initial_data(h_A, h_B, xyz);
checkCuda( hipEventRecord(startEvent,0));
// the first stream
const int stream0_in_bytes = (streamSize+1) * nx * ny * sizeof(float);
const int stream0_out_bytes = streamSize * nx * ny * sizeof(float);
checkCuda( hipMemcpyAsync(d_A, h_A, stream0_in_bytes, hipMemcpyHostToDevice, stream[0]));
// Run the GPU kernel
for(int t = 0; t < timesteps; t += 1) {
hipLaunchKernelGGL(( jacobi3d_7p_glmem), dim3(grid), dim3(block), 0, stream[0], d_A, d_B, nx, ny, streamSize+1, fac);
// swap input and output
tmp = d_A;
d_A = d_B;
d_B = tmp;
}
if(timesteps%2==0)
checkCuda( hipMemcpyAsync(h_A, d_B, stream0_out_bytes, hipMemcpyDeviceToHost));
else
checkCuda( hipMemcpyAsync(h_A, d_A, stream0_out_bytes, hipMemcpyDeviceToHost));
// the last stream
const int streaml_in_bytes = (streamSize+1) * nx * ny * sizeof(float);
const int streaml_out_bytes = streamSize * nx * ny * sizeof(float);
const int offset_l = (nstreams-1) * streamSize - nx * ny;
checkCuda( hipMemcpyAsync(&d_A[offset_l], &h_A[offset_l], streaml_in_bytes, hipMemcpyHostToDevice, stream[nstreams-1]));
// Run the GPU kernel
input = &d_A[offset_l];
output = &d_B[offset_l];
for(int t = 0; t < timesteps; t += 1) {
hipLaunchKernelGGL(( jacobi3d_7p_glmem), dim3(grid), dim3(block), 0, stream[nstreams-1], input, output, nx, ny, streamSize+1, fac);
// swap input and output
tmp = input;
input = output;
output = tmp;
}
if(timesteps%2==0)
checkCuda( hipMemcpyAsync(&h_A[offset_l], output, streaml_out_bytes, hipMemcpyDeviceToHost, stream[nstreams-1]));
else
checkCuda( hipMemcpyAsync(&h_A[offset_l], input, streaml_out_bytes, hipMemcpyDeviceToHost, stream[nstreams-1]));
// the middle stream
for (int i = 1; i < nstreams-1; ++i){
int offset = (i * streamSize - 1) * nx * ny;
checkCuda( hipMemcpyAsync(&d_A[offset], &h_A[offset], streamBytes, hipMemcpyHostToDevice, stream[i]));
// Run the GPU kernel
input = &d_A[offset];
output = &d_B[offset];
for(int t = 0; t < timesteps; t += 1) {
hipLaunchKernelGGL(( jacobi3d_7p_glmem), dim3(grid), dim3(block), 0, stream[i], input, output, nx, ny, streamSize+2, fac);
// swap input and output
tmp = input;
input = output;
output = tmp;
}
if(timesteps%2==0)
checkCuda( hipMemcpyAsync(&h_A[offset + nx*ny], output+nx*ny, streamSize*nx*ny*sizeof(float), hipMemcpyDeviceToHost, stream[i]));
else
checkCuda( hipMemcpyAsync(&h_A[offset + nx*ny], input + nx*ny, streamSize*nx*ny*sizeof(float), hipMemcpyDeviceToHost, stream[i]));
}
checkCuda( hipEventRecord(stopEvent, 0));
checkCuda( hipEventSynchronize(stopEvent));
checkCuda( hipEventElapsedTime(&ms, startEvent, stopEvent));
printf("Time for aysnchronous V1 transfer and execute (ms): %f\n", ms);
// asynchronous version 2:
// cleanup
checkCuda( hipEventDestroy(startEvent));
checkCuda( hipEventDestroy(stopEvent));
checkCuda(hipEventDestroy(dummyEvent));
for (int i=0; i < nstreams; ++i)
checkCuda( hipStreamDestroy(stream[i]));
hipFree(d_A);
hipFree(d_B);
hipHostFree(h_A);
hipHostFree(h_B);
return 0;
}
| 583ec4ac4df5216f619af17fc9017eb1b4d7dd57.cu |
#include <stdio.h>
#include <jacobi7_cuda.h>
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
void initial_data(float *h_A, float *h_B, const int xyz){
// randomly generaed test data
srand(time(NULL));
int i = 0;
for(; i < xyz; i++) {
h_A[i] = 1 + (float)rand() / (float)RAND_MAX;
h_B[i] = h_A[i];
}
}
int main(int argc, char* *argv){
if(argc != 7) {
printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TIME STEPS>\n", argv[0]);
return 1;
}
// program parameters trans
const int nx = atoi(argv[1]);
const int ny = atoi(argv[2]);
const int nz = atoi(argv[3]);
const int tx = atoi(argv[4]);
const int ty = atoi(argv[5]);
const int timesteps = atoi(argv[6]);
int devId = 0;
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, devId));
printf("Device : %s\n", prop.name);
checkCuda( cudaSetDevice(devId));
const int xyz = nx * ny * nz;
const int xyz_bytes = xyz * sizeof(float);
const int nstreams = 4;
const int streamSize = nz/nstreams; // sliced along z dimension
const int streamBytes = (streamSize+2) * nx * ny * sizeof(float);
float *h_A;
float *h_B;
float *d_A;
float *d_B;
// Allocate host buffers
checkCuda(cudaMallocHost((void**)&h_A, xyz_bytes)); // host pinned
checkCuda(cudaMallocHost((void**)&h_B, xyz_bytes));
checkCuda(cudaMalloc((void**)&d_A, xyz_bytes)); // device
checkCuda(cudaMalloc((void**)&d_B, xyz_bytes));
// grid data iniatialization
initial_data(h_A, h_B, xyz);
float fac = 6.0/(h_A[0] * h_A[0]);
dim3 grid(nx/tx, ny/ty);
dim3 block(tx, ty);
float* input = d_A;
float* output = d_B;
float ms; // elapsed time in milliseconds
// create events and streams
cudaEvent_t startEvent, stopEvent, dummyEvent;
cudaStream_t stream[nstreams];
checkCuda( cudaEventCreate(&startEvent) );
checkCuda( cudaEventCreate(&stopEvent) );
checkCuda( cudaEventCreate(&dummyEvent) );
for (int i = 0; i < nstreams; ++i)
checkCuda( cudaStreamCreate(&stream[i]) );
float *tmp;
// baseline case - sequential transfer and execute
checkCuda( cudaEventRecord(startEvent,0) );
checkCuda( cudaMemcpy(d_A, h_A, xyz_bytes, cudaMemcpyHostToDevice));
checkCuda( cudaMemcpy(d_B, d_A, xyz_bytes, cudaMemcpyDeviceToDevice));
// Run the GPU kernel
for(int t = 0; t < timesteps; t += 1) {
jacobi3d_7p_glmem<<<grid, block>>>(input, output, nx, ny, nz, fac);
// swap input and output
tmp = input;
input = output;
output = tmp;
}
if(timesteps%2==0)
checkCuda( cudaMemcpy(h_A, output, xyz_bytes, cudaMemcpyDeviceToHost));
else
checkCuda( cudaMemcpy(h_A, input, xyz_bytes, cudaMemcpyDeviceToHost));
checkCuda( cudaEventRecord(stopEvent, 0));
checkCuda( cudaEventSynchronize(stopEvent));
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent));
printf("Time for sequential transfer and execute (ms): %f\n", ms);
// aysnchronous version 1: loop over {copy, kernel, copy}
initial_data(h_A, h_B, xyz);
checkCuda( cudaEventRecord(startEvent,0));
// the first stream
const int stream0_in_bytes = (streamSize+1) * nx * ny * sizeof(float);
const int stream0_out_bytes = streamSize * nx * ny * sizeof(float);
checkCuda( cudaMemcpyAsync(d_A, h_A, stream0_in_bytes, cudaMemcpyHostToDevice, stream[0]));
// Run the GPU kernel
for(int t = 0; t < timesteps; t += 1) {
jacobi3d_7p_glmem<<<grid, block, 0, stream[0]>>>(d_A, d_B, nx, ny, streamSize+1, fac);
// swap input and output
tmp = d_A;
d_A = d_B;
d_B = tmp;
}
if(timesteps%2==0)
checkCuda( cudaMemcpyAsync(h_A, d_B, stream0_out_bytes, cudaMemcpyDeviceToHost));
else
checkCuda( cudaMemcpyAsync(h_A, d_A, stream0_out_bytes, cudaMemcpyDeviceToHost));
// the last stream
const int streaml_in_bytes = (streamSize+1) * nx * ny * sizeof(float);
const int streaml_out_bytes = streamSize * nx * ny * sizeof(float);
const int offset_l = (nstreams-1) * streamSize - nx * ny;
checkCuda( cudaMemcpyAsync(&d_A[offset_l], &h_A[offset_l], streaml_in_bytes, cudaMemcpyHostToDevice, stream[nstreams-1]));
// Run the GPU kernel
input = &d_A[offset_l];
output = &d_B[offset_l];
for(int t = 0; t < timesteps; t += 1) {
jacobi3d_7p_glmem<<<grid, block, 0, stream[nstreams-1]>>>(input, output, nx, ny, streamSize+1, fac);
// swap input and output
tmp = input;
input = output;
output = tmp;
}
if(timesteps%2==0)
checkCuda( cudaMemcpyAsync(&h_A[offset_l], output, streaml_out_bytes, cudaMemcpyDeviceToHost, stream[nstreams-1]));
else
checkCuda( cudaMemcpyAsync(&h_A[offset_l], input, streaml_out_bytes, cudaMemcpyDeviceToHost, stream[nstreams-1]));
// the middle stream
for (int i = 1; i < nstreams-1; ++i){
int offset = (i * streamSize - 1) * nx * ny;
checkCuda( cudaMemcpyAsync(&d_A[offset], &h_A[offset], streamBytes, cudaMemcpyHostToDevice, stream[i]));
// Run the GPU kernel
input = &d_A[offset];
output = &d_B[offset];
for(int t = 0; t < timesteps; t += 1) {
jacobi3d_7p_glmem<<<grid, block, 0, stream[i]>>>(input, output, nx, ny, streamSize+2, fac);
// swap input and output
tmp = input;
input = output;
output = tmp;
}
if(timesteps%2==0)
checkCuda( cudaMemcpyAsync(&h_A[offset + nx*ny], output+nx*ny, streamSize*nx*ny*sizeof(float), cudaMemcpyDeviceToHost, stream[i]));
else
checkCuda( cudaMemcpyAsync(&h_A[offset + nx*ny], input + nx*ny, streamSize*nx*ny*sizeof(float), cudaMemcpyDeviceToHost, stream[i]));
}
checkCuda( cudaEventRecord(stopEvent, 0));
checkCuda( cudaEventSynchronize(stopEvent));
checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent));
printf("Time for aysnchronous V1 transfer and execute (ms): %f\n", ms);
// asynchronous version 2:
// cleanup
checkCuda( cudaEventDestroy(startEvent));
checkCuda( cudaEventDestroy(stopEvent));
checkCuda(cudaEventDestroy(dummyEvent));
for (int i=0; i < nstreams; ++i)
checkCuda( cudaStreamDestroy(stream[i]));
cudaFree(d_A);
cudaFree(d_B);
cudaFreeHost(h_A);
cudaFreeHost(h_B);
return 0;
}
|
30e93b908ffe6f8a549e3301dc1e250c4a290bd5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
#include "../shared/test.cuh"
#include "../shared/test.cu"
int main(int argc, char** argv)
{
hipFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
GraphPR<OutEdge> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
//for(unsigned int i=0; i<100; i++)
// cout << graph.edgeList[i].end << " " << graph.edgeList[i].w8;
float initPR = 0.15;
float acc = 0.01;
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.delta[i] = initPR;
graph.value[i] = 0;
}
//graph.value[arguments.sourceNode] = 0;
//graph.label[arguments.sourceNode] = true;
gpuErrorcheck(hipMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(u_int64_t), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(float), hipMemcpyHostToDevice));
gpuErrorcheck(hipMemcpy(graph.d_delta, graph.delta, graph.num_nodes * sizeof(float), hipMemcpyHostToDevice));
Subgraph<OutEdge> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdge> subgen(graph);
subgen.generate(graph, subgraph, acc);
Partitioner<OutEdge> partitioner;
timer.Start();
uint gItr = 0;
bool finished;
bool *d_finished;
gpuErrorcheck(hipMalloc(&d_finished, sizeof(bool)));
while (subgraph.numActiveNodes>0)
{
gItr++;
partitioner.partition(subgraph, subgraph.numActiveNodes);
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
hipDeviceSynchronize();
gpuErrorcheck(hipMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdge), hipMemcpyHostToDevice));
hipDeviceSynchronize();
//moveUpLabels<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(subgraph.d_activeNodes, graph.d_label, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
//mixLabels<<<partitioner.partitionNodeSize[i]/512 + 1 , 512>>>(subgraph.d_activeNodes, graph.d_label1, graph.d_label2, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
uint itr = 0;
do
{
itr++;
finished = true;
gpuErrorcheck(hipMemcpy(d_finished, &finished, sizeof(bool), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( pr_async), dim3(partitioner.partitionNodeSize[i]/512 + 1) , dim3(512) , 0, 0, partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
graph.d_delta,
d_finished,
acc);
hipDeviceSynchronize();
gpuErrorcheck( hipPeekAtLastError() );
gpuErrorcheck(hipMemcpy(&finished, d_finished, sizeof(bool), hipMemcpyDeviceToHost));
}while(!(finished));
cout << itr << ((itr>1) ? " Inner Iterations" : " Inner Iteration") << " in Global Iteration " << gItr << ", Partition " << i << endl;
}
subgen.generate(graph, subgraph, acc);
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
gpuErrorcheck(hipMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(float), hipMemcpyDeviceToHost));
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
| 30e93b908ffe6f8a549e3301dc1e250c4a290bd5.cu | #include "../shared/globals.hpp"
#include "../shared/timer.hpp"
#include "../shared/argument_parsing.cuh"
#include "../shared/graph.cuh"
#include "../shared/subgraph.cuh"
#include "../shared/partitioner.cuh"
#include "../shared/subgraph_generator.cuh"
#include "../shared/gpu_error_check.cuh"
#include "../shared/gpu_kernels.cuh"
#include "../shared/subway_utilities.hpp"
#include "../shared/test.cuh"
#include "../shared/test.cu"
int main(int argc, char** argv)
{
cudaFree(0);
ArgumentParser arguments(argc, argv, true, false);
Timer timer;
timer.Start();
GraphPR<OutEdge> graph(arguments.input, true);
graph.ReadGraph();
float readtime = timer.Finish();
cout << "Graph Reading finished in " << readtime/1000 << " (s).\n";
//for(unsigned int i=0; i<100; i++)
// cout << graph.edgeList[i].end << " " << graph.edgeList[i].w8;
float initPR = 0.15;
float acc = 0.01;
for(unsigned int i=0; i<graph.num_nodes; i++)
{
graph.delta[i] = initPR;
graph.value[i] = 0;
}
//graph.value[arguments.sourceNode] = 0;
//graph.label[arguments.sourceNode] = true;
gpuErrorcheck(cudaMemcpy(graph.d_outDegree, graph.outDegree, graph.num_nodes * sizeof(u_int64_t), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_value, graph.value, graph.num_nodes * sizeof(float), cudaMemcpyHostToDevice));
gpuErrorcheck(cudaMemcpy(graph.d_delta, graph.delta, graph.num_nodes * sizeof(float), cudaMemcpyHostToDevice));
Subgraph<OutEdge> subgraph(graph.num_nodes, graph.num_edges);
SubgraphGenerator<OutEdge> subgen(graph);
subgen.generate(graph, subgraph, acc);
Partitioner<OutEdge> partitioner;
timer.Start();
uint gItr = 0;
bool finished;
bool *d_finished;
gpuErrorcheck(cudaMalloc(&d_finished, sizeof(bool)));
while (subgraph.numActiveNodes>0)
{
gItr++;
partitioner.partition(subgraph, subgraph.numActiveNodes);
// a super iteration
for(int i=0; i<partitioner.numPartitions; i++)
{
cudaDeviceSynchronize();
gpuErrorcheck(cudaMemcpy(subgraph.d_activeEdgeList, subgraph.activeEdgeList + partitioner.fromEdge[i], (partitioner.partitionEdgeSize[i]) * sizeof(OutEdge), cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
//moveUpLabels<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(subgraph.d_activeNodes, graph.d_label, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
//mixLabels<<<partitioner.partitionNodeSize[i]/512 + 1 , 512>>>(subgraph.d_activeNodes, graph.d_label1, graph.d_label2, partitioner.partitionNodeSize[i], partitioner.fromNode[i]);
uint itr = 0;
do
{
itr++;
finished = true;
gpuErrorcheck(cudaMemcpy(d_finished, &finished, sizeof(bool), cudaMemcpyHostToDevice));
pr_async<<< partitioner.partitionNodeSize[i]/512 + 1 , 512 >>>(partitioner.partitionNodeSize[i],
partitioner.fromNode[i],
partitioner.fromEdge[i],
subgraph.d_activeNodes,
subgraph.d_activeNodesPointer,
subgraph.d_activeEdgeList,
graph.d_outDegree,
graph.d_value,
graph.d_delta,
d_finished,
acc);
cudaDeviceSynchronize();
gpuErrorcheck( cudaPeekAtLastError() );
gpuErrorcheck(cudaMemcpy(&finished, d_finished, sizeof(bool), cudaMemcpyDeviceToHost));
}while(!(finished));
cout << itr << ((itr>1) ? " Inner Iterations" : " Inner Iteration") << " in Global Iteration " << gItr << ", Partition " << i << endl;
}
subgen.generate(graph, subgraph, acc);
}
float runtime = timer.Finish();
cout << "Processing finished in " << runtime/1000 << " (s).\n";
gpuErrorcheck(cudaMemcpy(graph.value, graph.d_value, graph.num_nodes*sizeof(float), cudaMemcpyDeviceToHost));
utilities::PrintResults(graph.value, min(30, graph.num_nodes));
if(arguments.hasOutput)
utilities::SaveResults(arguments.output, graph.value, graph.num_nodes);
}
|
06-reduction_v1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* asum: sum of all entries of a vector */
#include "reduction_aux.h"
//typedef double FLOAT;
/* sum all entries in x and asign to y
* block dim must be 256 */
__global__ void asum_stg_1(const FLOAT *x, FLOAT *y, int N)
{
__shared__ FLOAT sdata[256];
int idx = get_tid();
int tid = threadIdx.x;
int bid = get_bid();
/* load data to shared mem */
if (idx < N) {
sdata[tid] = x[idx];
}
else {
sdata[tid] = 0;
}
__syncthreads();
/* reduction using shared mem */
if (tid < 128) sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64) sdata[tid] += sdata[tid + 64];
__syncthreads();
if (tid < 32) sdata[tid] += sdata[tid + 32];
__syncthreads();
if (tid < 16) sdata[tid] += sdata[tid + 16];
__syncthreads();
if (tid < 8) sdata[tid] += sdata[tid + 8];
__syncthreads();
if (tid < 4) sdata[tid] += sdata[tid + 4];
__syncthreads();
if (tid < 2) sdata[tid] += sdata[tid + 2];
__syncthreads();
if (tid == 0) {
y[bid] = sdata[0] + sdata[1];
}
}
__global__ void asum_stg_3(FLOAT *x, int N)
{
__shared__ FLOAT sdata[128];
int tid = threadIdx.x;
int i;
sdata[tid] = 0;
/* load data to shared mem */
for (i = 0; i < N; i += 128) {
if (tid + i < N) sdata[tid] += x[i + tid];
}
__syncthreads();
/* reduction using shared mem */
if (tid < 64) sdata[tid] = sdata[tid] + sdata[tid + 64];
__syncthreads();
if (tid < 32) sdata[tid] = sdata[tid] + sdata[tid + 32];
__syncthreads();
if (tid < 16) sdata[tid] += sdata[tid + 16];
__syncthreads();
if (tid < 8) sdata[tid] += sdata[tid + 8];
__syncthreads();
if (tid < 4) sdata[tid] += sdata[tid + 4];
__syncthreads();
if (tid < 2) sdata[tid] += sdata[tid + 2];
__syncthreads();
if (tid == 0) {
x[0] = sdata[0] + sdata[1];
}
}
/* dy and dz serve as cache: result stores in dz[0] */
void asum(FLOAT *dx, FLOAT *dy, FLOAT *dz, int N)
{
/* 1D block */
int bs = 256;
/* 2D grid */
int s = ceil(sqrt((N + bs - 1.) / bs));
dim3 grid = dim3(s, s);
int gs = 0;
/* stage 1 */
hipLaunchKernelGGL(( asum_stg_1), dim3(grid), dim3(bs), 0, 0, dx, dy, N);
/* stage 2 */
{
/* 1D grid */
int N2 = (N + bs - 1) / bs;
int s2 = ceil(sqrt((N2 + bs - 1.) / bs));
dim3 grid2 = dim3(s2, s2);
hipLaunchKernelGGL(( asum_stg_1), dim3(grid2), dim3(bs), 0, 0, dy, dz, N2);
/* record gs */
gs = (N2 + bs - 1.) / bs;
}
/* stage 3 */
hipLaunchKernelGGL(( asum_stg_3), dim3(1), dim3(128), 0, 0, dz, gs);
}
/* host, add */
FLOAT asum_host(FLOAT *x, int N);
FLOAT asum_host(FLOAT *x, int N)
{
int i;
double t = 0;
for (i = 0; i < N; i++) {
t += x[i];
printf(">>>asum_host t=%f,x[i]=%f\n", t, x[i]);
}
return t;
}
//int main0602(int argc, char **argv)
int main0602()
{
int N = 1024;
//int N = 1024*10000;
int nbytes = N * sizeof(FLOAT);
FLOAT *dx = NULL, *hx = NULL;
FLOAT *dy = NULL, *dz;
int i, itr = 20;
FLOAT asd = 0, ash;
double td, th;
//if (argc == 2) {
// int an;
// an = atoi(argv[1]);
// if (an > 0) N = an;
//}
/* allocate GPU mem */
hipMalloc((void **)&dx, nbytes);
hipMalloc((void **)&dy, sizeof(FLOAT) * ((N + 255) / 256));
hipMalloc((void **)&dz, sizeof(FLOAT) * ((N + 255) / 256));
if (dx == NULL || dy == NULL || dz == NULL) {
printf("couldn't allocate GPU memory\n");
return -1;
}
printf("allocated %e MB on GPU\n", nbytes / (1024.f * 1024.f));
/* alllocate CPU mem */
hx = (FLOAT *) malloc(nbytes);
if (hx == NULL) {
printf("couldn't allocate CPU memory\n");
return -2;
}
printf("allocated %e MB on CPU\n", nbytes / (1024.f * 1024.f));
/* init */
for (i = 0; i < N; i++) {
hx[i] = 1;
}
/* copy data to GPU */
hipMemcpy(dx, hx, nbytes, hipMemcpyHostToDevice);
/* let dust fall */
hipDeviceSynchronize();
td = get_time();
/* call GPU */
for (i = 0; i < itr; i++) asum(dx, dy, dz, N);
/* let GPU finish */
hipDeviceSynchronize();
td = get_time() - td;
th = get_time();
for (i = 0; i < itr; i++) ash = asum_host(hx, N);
th = get_time() - th;
/* copy data from GPU */
hipMemcpy(&asd, dz, sizeof(FLOAT), hipMemcpyDeviceToHost);
printf("asum, answer: %d, calculated by GPU:%f, calculated by CPU:%f\n", N, asd, ash);
printf("GPU time: %e, CPU time: %e, speedup: %g\n", td, th, th / td);
hipFree(dx);
hipFree(dy);
hipFree(dz);
free(hx);
return 0;
}
| 06-reduction_v1.cu |
/* asum: sum of all entries of a vector */
#include "reduction_aux.h"
//typedef double FLOAT;
/* sum all entries in x and asign to y
* block dim must be 256 */
__global__ void asum_stg_1(const FLOAT *x, FLOAT *y, int N)
{
__shared__ FLOAT sdata[256];
int idx = get_tid();
int tid = threadIdx.x;
int bid = get_bid();
/* load data to shared mem */
if (idx < N) {
sdata[tid] = x[idx];
}
else {
sdata[tid] = 0;
}
__syncthreads();
/* reduction using shared mem */
if (tid < 128) sdata[tid] += sdata[tid + 128];
__syncthreads();
if (tid < 64) sdata[tid] += sdata[tid + 64];
__syncthreads();
if (tid < 32) sdata[tid] += sdata[tid + 32];
__syncthreads();
if (tid < 16) sdata[tid] += sdata[tid + 16];
__syncthreads();
if (tid < 8) sdata[tid] += sdata[tid + 8];
__syncthreads();
if (tid < 4) sdata[tid] += sdata[tid + 4];
__syncthreads();
if (tid < 2) sdata[tid] += sdata[tid + 2];
__syncthreads();
if (tid == 0) {
y[bid] = sdata[0] + sdata[1];
}
}
__global__ void asum_stg_3(FLOAT *x, int N)
{
__shared__ FLOAT sdata[128];
int tid = threadIdx.x;
int i;
sdata[tid] = 0;
/* load data to shared mem */
for (i = 0; i < N; i += 128) {
if (tid + i < N) sdata[tid] += x[i + tid];
}
__syncthreads();
/* reduction using shared mem */
if (tid < 64) sdata[tid] = sdata[tid] + sdata[tid + 64];
__syncthreads();
if (tid < 32) sdata[tid] = sdata[tid] + sdata[tid + 32];
__syncthreads();
if (tid < 16) sdata[tid] += sdata[tid + 16];
__syncthreads();
if (tid < 8) sdata[tid] += sdata[tid + 8];
__syncthreads();
if (tid < 4) sdata[tid] += sdata[tid + 4];
__syncthreads();
if (tid < 2) sdata[tid] += sdata[tid + 2];
__syncthreads();
if (tid == 0) {
x[0] = sdata[0] + sdata[1];
}
}
/* dy and dz serve as cache: result stores in dz[0] */
void asum(FLOAT *dx, FLOAT *dy, FLOAT *dz, int N)
{
/* 1D block */
int bs = 256;
/* 2D grid */
int s = ceil(sqrt((N + bs - 1.) / bs));
dim3 grid = dim3(s, s);
int gs = 0;
/* stage 1 */
asum_stg_1<<<grid, bs>>>(dx, dy, N);
/* stage 2 */
{
/* 1D grid */
int N2 = (N + bs - 1) / bs;
int s2 = ceil(sqrt((N2 + bs - 1.) / bs));
dim3 grid2 = dim3(s2, s2);
asum_stg_1<<<grid2, bs>>>(dy, dz, N2);
/* record gs */
gs = (N2 + bs - 1.) / bs;
}
/* stage 3 */
asum_stg_3<<<1, 128>>>(dz, gs);
}
/* host, add */
FLOAT asum_host(FLOAT *x, int N);
FLOAT asum_host(FLOAT *x, int N)
{
int i;
double t = 0;
for (i = 0; i < N; i++) {
t += x[i];
printf(">>>asum_host t=%f,x[i]=%f\n", t, x[i]);
}
return t;
}
//int main0602(int argc, char **argv)
int main0602()
{
int N = 1024;
//int N = 1024*10000;
int nbytes = N * sizeof(FLOAT);
FLOAT *dx = NULL, *hx = NULL;
FLOAT *dy = NULL, *dz;
int i, itr = 20;
FLOAT asd = 0, ash;
double td, th;
//if (argc == 2) {
// int an;
// an = atoi(argv[1]);
// if (an > 0) N = an;
//}
/* allocate GPU mem */
cudaMalloc((void **)&dx, nbytes);
cudaMalloc((void **)&dy, sizeof(FLOAT) * ((N + 255) / 256));
cudaMalloc((void **)&dz, sizeof(FLOAT) * ((N + 255) / 256));
if (dx == NULL || dy == NULL || dz == NULL) {
printf("couldn't allocate GPU memory\n");
return -1;
}
printf("allocated %e MB on GPU\n", nbytes / (1024.f * 1024.f));
/* alllocate CPU mem */
hx = (FLOAT *) malloc(nbytes);
if (hx == NULL) {
printf("couldn't allocate CPU memory\n");
return -2;
}
printf("allocated %e MB on CPU\n", nbytes / (1024.f * 1024.f));
/* init */
for (i = 0; i < N; i++) {
hx[i] = 1;
}
/* copy data to GPU */
cudaMemcpy(dx, hx, nbytes, cudaMemcpyHostToDevice);
/* let dust fall */
cudaDeviceSynchronize();
td = get_time();
/* call GPU */
for (i = 0; i < itr; i++) asum(dx, dy, dz, N);
/* let GPU finish */
cudaDeviceSynchronize();
td = get_time() - td;
th = get_time();
for (i = 0; i < itr; i++) ash = asum_host(hx, N);
th = get_time() - th;
/* copy data from GPU */
cudaMemcpy(&asd, dz, sizeof(FLOAT), cudaMemcpyDeviceToHost);
printf("asum, answer: %d, calculated by GPU:%f, calculated by CPU:%f\n", N, asd, ash);
printf("GPU time: %e, CPU time: %e, speedup: %g\n", td, th, th / td);
cudaFree(dx);
cudaFree(dy);
cudaFree(dz);
free(hx);
return 0;
}
|
c6870245a502fa79e10f6daa2057367e6d42247c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS TRMM kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Trmm template is instantiated in the function CutlassStrmmNN. This is kernel computes
the triangular matrix product (TRMM) using double-precision doubleing-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 64x64x16 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the STRMM kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for double-precision TRMM kernel
//
// Defines cutlass::gemm::device::Trmm, the generic Trmm computation template class.
#include "cutlass/gemm/device/trmm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS TRMM kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS TRMM template and launch a TRMM kernel.
hipError_t CutlassStrmmNN(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
// Define type definition for double-precision CUTLASS TRMM with column-major
// input matrices and 64x64x16 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for double-precision TRMM. Typical values are used as
// default template arguments.
//
// To view the full trmm device API interface, see `cutlass/gemm/device/trmm.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassTrmm = cutlass::gemm::device::Trmm<
double,
ColumnMajor,
cutlass::SideMode::kLeft,
cutlass::FillMode::kLower,
cutlass::DiagType::kNonUnit,
double,
ColumnMajor,
double,
ColumnMajor,
double,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<8, 8, 4>,
cutlass::epilogue::thread::LinearCombination<
double,
1,
double,
double,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
5,
1,
1,
false,
cutlass::arch::OpMultiplyAdd
>;
// Define a CUTLASS TRMM type
CutlassTrmm trmm_operator;
// Construct the CUTLASS TRMM arguments object.
//
// One of CUTLASS's design patterns is to define trmm argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Trmm and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassTrmm::Arguments args(cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, M}, // Trmm Problem dimensions in Left-Side Mode
1, // batch_count,
{alpha}, // Scalars used in the Epilogue
reinterpret_cast<void const *>(A),
reinterpret_cast<void const *>(B),
reinterpret_cast<void *>(C), // destination matrix D (may be different memory than source C matrix)
(int64_t)M*M, // Batch strides
(int64_t)M*N,
(int64_t)M*N,
lda,
ldb,
ldc);
//
// Launch the CUTLASS TRMM kernel.
//
cutlass::Status status = trmm_operator(args);
//
// Return a hipError_t if the CUTLASS TRMM operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return hipErrorUnknown;
}
// Return success, if no errors were encountered.
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
double *matrix,
int ldm,
int rows,
int columns,
int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
if (fill_mode == cutlass::FillMode::kLower && i < j) return;
else if (fill_mode == cutlass::FillMode::kUpper && i > j) return;
int offset = i + j * ldm;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
double value = double(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
hipError_t InitializeMatrix(double *matrix, int ldm, int rows, int columns, int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
hipLaunchKernelGGL(( InitializeMatrix_kernel), dim3(grid), dim3(block) , 0, 0, matrix, ldm, rows, columns, seed, fill_mode);
return hipGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
hipError_t AllocateMatrix(double **matrix, int ldm, int rows, int columns, int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
hipError_t result;
size_t sizeof_matrix = sizeof(double) * ldm * columns;
// Allocate device memory.
result = hipMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != hipSuccess) {
std::cerr << "Failed to allocate matrix: "
<< hipGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = hipMemset(*matrix, 0, sizeof_matrix);
if (result != hipSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< hipGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, ldm, rows, columns, seed, fill_mode);
if (result != hipSuccess) {
std::cerr << "Failed to initialize matrix: "
<< hipGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference TRMM computation.
__global__ void ReferenceTrmm_kernel(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < M && j < N) {
double accumulator = 0;
for (int k = 0; k < M; ++k) {
accumulator += A[i + k * lda] * B[k + j * ldb]; // Since A is in Left-Side Mode
}
C[i + j * ldc] = alpha * accumulator;
}
}
/// Reference TRMM computation.
hipError_t ReferenceTrmm(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(M + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
hipLaunchKernelGGL(( ReferenceTrmm_kernel), dim3(grid), dim3(block) , 0, 0, M, N, alpha, A, lda, B, ldb, C, ldc);
return hipGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a double-precision
/// CUTLASS TRMM kernel.
hipError_t TestCutlassTrmm(int M, int N, double alpha) {
hipError_t result;
//
// Define several matrices to be used as operands to TRMM kernels.
//
// Compute leading dimensions for each matrix.
int lda = M;
int ldb = M;
int ldc = M;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(double) * ldc * N;
// Define pointers to matrices in GPU device memory.
double *A;
double *B;
double *C_cutlass;
double *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, lda, M, M, 0, cutlass::FillMode::kLower);
if (result != hipSuccess) {
return result;
}
result = AllocateMatrix(&B, ldb, M, N, 17);
if (result != hipSuccess) {
hipFree(A);
return result;
}
result = AllocateMatrix(&C_cutlass, ldc, M, N, 101);
if (result != hipSuccess) {
hipFree(A);
hipFree(B);
return result;
}
result = AllocateMatrix(&C_reference, ldc, M, N, 101);
if (result != hipSuccess) {
hipFree(A);
hipFree(B);
hipFree(C_cutlass);
return result;
}
result = hipMemcpy(C_reference, C_cutlass, sizeof_C, hipMemcpyDeviceToDevice);
if (result != hipSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Launch CUTLASS TRMM.
//
result = CutlassStrmmNN(M, N, alpha, A, lda, B, ldb, C_cutlass, ldc);
if (result != hipSuccess) {
std::cerr << "CUTLASS TRMM kernel failed: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Verify.
//
// Launch reference TRMM
result = ReferenceTrmm(M, N, alpha, A, lda, B, ldb, C_reference, ldc);
if (result != hipSuccess) {
std::cerr << "Reference TRMM kernel failed: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<double> host_cutlass(ldc * N, 0);
std::vector<double> host_reference(ldc * N, 0);
result = hipMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, hipMemcpyDeviceToHost);
if (result != hipSuccess) {
std::cerr << "Failed to copy CUTLASS TRMM results: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
result = hipMemcpy(host_reference.data(), C_reference, sizeof_C, hipMemcpyDeviceToHost);
if (result != hipSuccess) {
std::cerr << "Failed to copy Reference TRMM results: "
<< hipGetErrorString(result) << std::endl;
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
return result;
}
//
// Free device memory allocations.
//
hipFree(C_reference);
hipFree(C_cutlass);
hipFree(B);
hipFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return hipErrorUnknown;
}
return hipSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_trmm example.
//
// usage:
//
// 00_basic_trmm <M> <N> <alpha>
//
int main(int argc, const char *arg[]) {
bool notSupported = false;
// CUTLASS must be compiled with CUDA 11 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "This example requires compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
//
// Parse the command line to obtain TRMM dimensions and scalar values.
//
// TRMM problem dimensions.
int problem[2] = { 128, 128 };
for (int i = 1; i < argc && i < 3; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
double scalars[1] = { 1 };
for (int i = 3; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 3];
}
//
// Run the CUTLASS TRMM test.
//
hipError_t result = TestCutlassTrmm(
problem[0], // TRMM M dimension
problem[1], // TRMM N dimension
scalars[0] // alpha
);
if (result == hipSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == hipSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
| c6870245a502fa79e10f6daa2057367e6d42247c.cu | /***************************************************************************************************
* Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/*
This example demonstrates how to call a CUTLASS TRMM kernel and provides a naive reference
matrix multiply kernel to verify its correctness.
The CUTLASS Trmm template is instantiated in the function CutlassStrmmNN. This is kernel computes
the triangular matrix product (TRMM) using double-precision doubleing-point arithmetic and assumes
all matrices have column-major layout.
The threadblock tile size is chosen as 64x64x16 which offers good performance for large matrices.
See the CUTLASS Parallel for All blog post for more exposition on the tunable parameters available
in CUTLASS.
https://devblogs.nvidia.com/cutlass-linear-algebra-cuda/
Aside from defining and launching the STRMM kernel, this example does not use any other components
or utilities within CUTLASS. Such utilities are demonstrated elsewhere in other examples and are
prevalent in the CUTLASS unit tests.
*/
// Standard Library includes
#include <iostream>
#include <sstream>
#include <vector>
// Helper methods to check for errors
#include "helper.h"
//
// CUTLASS includes needed for double-precision TRMM kernel
//
// Defines cutlass::gemm::device::Trmm, the generic Trmm computation template class.
#include "cutlass/gemm/device/trmm.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// This function defines a CUTLASS TRMM kernel instantiation, constructs its parameters object,
// and launches it on the CUDA device.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Define a CUTLASS TRMM template and launch a TRMM kernel.
cudaError_t CutlassStrmmNN(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
// Define type definition for double-precision CUTLASS TRMM with column-major
// input matrices and 64x64x16 threadblock tile size (chosen by default).
//
// To keep the interface manageable, several helpers are defined for plausible compositions
// including the following example for double-precision TRMM. Typical values are used as
// default template arguments.
//
// To view the full trmm device API interface, see `cutlass/gemm/device/trmm.h`
using ColumnMajor = cutlass::layout::ColumnMajor;
using CutlassTrmm = cutlass::gemm::device::Trmm<
double,
ColumnMajor,
cutlass::SideMode::kLeft,
cutlass::FillMode::kLower,
cutlass::DiagType::kNonUnit,
double,
ColumnMajor,
double,
ColumnMajor,
double,
cutlass::arch::OpClassTensorOp,
cutlass::arch::Sm80,
cutlass::gemm::GemmShape<64, 64, 16>,
cutlass::gemm::GemmShape<32, 32, 16>,
cutlass::gemm::GemmShape<8, 8, 4>,
cutlass::epilogue::thread::LinearCombination<
double,
1,
double,
double,
cutlass::epilogue::thread::ScaleType::OnlyAlphaScaling
>,
cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>,
5,
1,
1,
false,
cutlass::arch::OpMultiplyAdd
>;
// Define a CUTLASS TRMM type
CutlassTrmm trmm_operator;
// Construct the CUTLASS TRMM arguments object.
//
// One of CUTLASS's design patterns is to define trmm argument objects that are constructible
// in host code and passed to kernels by value. These may include pointers, strides, scalars,
// and other arguments needed by Trmm and its components.
//
// The benefits of this pattern are (1.) a structured, composable strategy for passing host-constructible
// arguments to kernels and (2.) minimized initialization overhead on kernel entry.
//
CutlassTrmm::Arguments args(cutlass::gemm::GemmUniversalMode::kGemm,
{M, N, M}, // Trmm Problem dimensions in Left-Side Mode
1, // batch_count,
{alpha}, // Scalars used in the Epilogue
reinterpret_cast<void const *>(A),
reinterpret_cast<void const *>(B),
reinterpret_cast<void *>(C), // destination matrix D (may be different memory than source C matrix)
(int64_t)M*M, // Batch strides
(int64_t)M*N,
(int64_t)M*N,
lda,
ldb,
ldc);
//
// Launch the CUTLASS TRMM kernel.
//
cutlass::Status status = trmm_operator(args);
//
// Return a cudaError_t if the CUTLASS TRMM operator returned an error code.
//
if (status != cutlass::Status::kSuccess) {
return cudaErrorUnknown;
}
// Return success, if no errors were encountered.
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
//
// The source code after this point in the file is generic CUDA using the CUDA Runtime API
// and simple CUDA kernels to initialize matrices and compute the general matrix product.
//
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Kernel to initialize a matrix with small integers.
__global__ void InitializeMatrix_kernel(
double *matrix,
int ldm,
int rows,
int columns,
int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < rows && j < columns) {
if (fill_mode == cutlass::FillMode::kLower && i < j) return;
else if (fill_mode == cutlass::FillMode::kUpper && i > j) return;
int offset = i + j * ldm;
// Generate arbitrary elements.
int const k = 16807;
int const m = 16;
double value = double(((offset + seed) * k % m) - m / 2);
matrix[offset] = value;
}
}
/// Simple function to initialize a matrix to arbitrary small integers.
cudaError_t InitializeMatrix(double *matrix, int ldm, int rows, int columns, int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
dim3 block(16, 16);
dim3 grid(
(rows + block.x - 1) / block.x,
(columns + block.y - 1) / block.y
);
InitializeMatrix_kernel<<< grid, block >>>(matrix, ldm, rows, columns, seed, fill_mode);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocates device memory for a matrix then fills with arbitrary small integers.
cudaError_t AllocateMatrix(double **matrix, int ldm, int rows, int columns, int seed = 0,
cutlass::FillMode fill_mode = cutlass::FillMode::kInvalid) {
cudaError_t result;
size_t sizeof_matrix = sizeof(double) * ldm * columns;
// Allocate device memory.
result = cudaMalloc(reinterpret_cast<void **>(matrix), sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to allocate matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Clear the allocation.
result = cudaMemset(*matrix, 0, sizeof_matrix);
if (result != cudaSuccess) {
std::cerr << "Failed to clear matrix device memory: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
// Initialize matrix elements to arbitrary small integers.
result = InitializeMatrix(*matrix, ldm, rows, columns, seed, fill_mode);
if (result != cudaSuccess) {
std::cerr << "Failed to initialize matrix: "
<< cudaGetErrorString(result) << std::endl;
return result;
}
return result;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Naive reference TRMM computation.
__global__ void ReferenceTrmm_kernel(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if (i < M && j < N) {
double accumulator = 0;
for (int k = 0; k < M; ++k) {
accumulator += A[i + k * lda] * B[k + j * ldb]; // Since A is in Left-Side Mode
}
C[i + j * ldc] = alpha * accumulator;
}
}
/// Reference TRMM computation.
cudaError_t ReferenceTrmm(
int M,
int N,
double alpha,
double const *A,
int lda,
double const *B,
int ldb,
double *C,
int ldc) {
dim3 block(16, 16);
dim3 grid(
(M + block.x - 1) / block.x,
(N + block.y - 1) / block.y
);
ReferenceTrmm_kernel<<< grid, block >>>(M, N, alpha, A, lda, B, ldb, C, ldc);
return cudaGetLastError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Allocate several matrices in GPU device memory and call a double-precision
/// CUTLASS TRMM kernel.
cudaError_t TestCutlassTrmm(int M, int N, double alpha) {
cudaError_t result;
//
// Define several matrices to be used as operands to TRMM kernels.
//
// Compute leading dimensions for each matrix.
int lda = M;
int ldb = M;
int ldc = M;
// Compute size in bytes of the C matrix.
size_t sizeof_C = sizeof(double) * ldc * N;
// Define pointers to matrices in GPU device memory.
double *A;
double *B;
double *C_cutlass;
double *C_reference;
//
// Allocate matrices in GPU device memory with arbitrary seeds.
//
result = AllocateMatrix(&A, lda, M, M, 0, cutlass::FillMode::kLower);
if (result != cudaSuccess) {
return result;
}
result = AllocateMatrix(&B, ldb, M, N, 17);
if (result != cudaSuccess) {
cudaFree(A);
return result;
}
result = AllocateMatrix(&C_cutlass, ldc, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
return result;
}
result = AllocateMatrix(&C_reference, ldc, M, N, 101);
if (result != cudaSuccess) {
cudaFree(A);
cudaFree(B);
cudaFree(C_cutlass);
return result;
}
result = cudaMemcpy(C_reference, C_cutlass, sizeof_C, cudaMemcpyDeviceToDevice);
if (result != cudaSuccess) {
std::cerr << "Failed to copy C_cutlass matrix to C_reference: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Launch CUTLASS TRMM.
//
result = CutlassStrmmNN(M, N, alpha, A, lda, B, ldb, C_cutlass, ldc);
if (result != cudaSuccess) {
std::cerr << "CUTLASS TRMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Verify.
//
// Launch reference TRMM
result = ReferenceTrmm(M, N, alpha, A, lda, B, ldb, C_reference, ldc);
if (result != cudaSuccess) {
std::cerr << "Reference TRMM kernel failed: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
// Copy to host and verify equivalence.
std::vector<double> host_cutlass(ldc * N, 0);
std::vector<double> host_reference(ldc * N, 0);
result = cudaMemcpy(host_cutlass.data(), C_cutlass, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy CUTLASS TRMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
result = cudaMemcpy(host_reference.data(), C_reference, sizeof_C, cudaMemcpyDeviceToHost);
if (result != cudaSuccess) {
std::cerr << "Failed to copy Reference TRMM results: "
<< cudaGetErrorString(result) << std::endl;
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
return result;
}
//
// Free device memory allocations.
//
cudaFree(C_reference);
cudaFree(C_cutlass);
cudaFree(B);
cudaFree(A);
//
// Test for bit equivalence of results.
//
if (host_cutlass != host_reference) {
std::cerr << "CUTLASS results incorrect." << std::endl;
return cudaErrorUnknown;
}
return cudaSuccess;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
/// Entry point to basic_trmm example.
//
// usage:
//
// 00_basic_trmm <M> <N> <alpha>
//
int main(int argc, const char *arg[]) {
bool notSupported = false;
// CUTLASS must be compiled with CUDA 11 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ >= 11)) {
std::cerr << "NVIDIA Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "This example requires compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
return 0;
}
//
// Parse the command line to obtain TRMM dimensions and scalar values.
//
// TRMM problem dimensions.
int problem[2] = { 128, 128 };
for (int i = 1; i < argc && i < 3; ++i) {
std::stringstream ss(arg[i]);
ss >> problem[i - 1];
}
// Scalars used for linear scaling the result of the matrix product.
double scalars[1] = { 1 };
for (int i = 3; i < argc && i < 4; ++i) {
std::stringstream ss(arg[i]);
ss >> scalars[i - 3];
}
//
// Run the CUTLASS TRMM test.
//
cudaError_t result = TestCutlassTrmm(
problem[0], // TRMM M dimension
problem[1], // TRMM N dimension
scalars[0] // alpha
);
if (result == cudaSuccess) {
std::cout << "Passed." << std::endl;
}
// Exit.
return result == cudaSuccess ? 0 : -1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////
|
261039a91de35fbf16eab810a2bb48fa372009fa.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace twisted {
#undef GPU_STAGGERED_DIRAC
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_TWISTED_MASS_DIRAC
#include <tm_dslash_def.h> // Twisted Mass kernels
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace twisted
// declare the dslash events
#include <dslash_events.cuh>
using namespace twisted;
#ifdef GPU_TWISTED_MASS_DIRAC
template <typename sFloat, typename gFloat>
class TwistedDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistDslashType dslashType;
double a, b, c, d;
protected:
unsigned int sharedBytesPerThread() const
{
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
}
public:
TwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x,
const QudaTwistDslashType dslashType, const double kappa, const double mu,
const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), dslashType(dslashType)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
if (dslashType == QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for twisted-mass Dslash");
dslashParam.fl_stride = in->VolumeCB();
}
virtual ~TwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
strcat(key.aux,",TwistInvDslash");
break;
case QUDA_DEG_DSLASH_TWIST_INV:
strcat(key.aux,",");
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
strcat(key.aux,",DslashTwist");
break;
default:
errorQuda("Unsupported twisted-dslash type %d", dslashType);
}
return key;
}
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dslashParam.block[0] = tp.aux.x; dslashParam.block[1] = tp.aux.y; dslashParam.block[2] = tp.aux.z; dslashParam.block[3] = tp.aux.w;
for (int i=0; i<4; i++) dslashParam.grid[i] = ( (i==0 ? 2 : 1) * in->X(i)) / dslashParam.block[i];
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
DSLASH(twistedMassTwistInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_INV:
DSLASH(twistedMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
DSLASH(twistedMassDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)x->V(), (float*)x->Norm());
break;
default: errorQuda("Invalid twisted mass dslash type");
}
}
long long flops() const {
int twisted_flops = 48;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
// twisted mass flops are done in the interior kernel
flops += twisted_flops * in->VolumeCB();
break;
}
return flops;
}
};
#endif // GPU_TWISTED_MASS_DIRAC
#include <dslash_policy.cuh>
void twistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistDslashType type,
const double &kappa, const double &mu, const double &epsilon,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
inSpinor->allocateGhostBuffer(1);
#ifdef GPU_TWISTED_MASS_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = in->Volume();
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i][0] = in->GhostOffset(i,0)/in->FieldOrder();
dslashParam.ghostOffset[i][1] = in->GhostOffset(i,1)/in->FieldOrder();
dslashParam.ghostNormOffset[i][0] = in->GhostNormOffset(i,0);
dslashParam.ghostNormOffset[i][1] = in->GhostNormOffset(i,1);
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = in->GhostFace()[i];
}
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(true);
twist_a = kappa;
twist_b = mu;
}
#endif
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new TwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
}
#ifndef GPU_COMMS
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
dslash_policy.apply(0);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslashImp;
#endif
delete dslash;
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(false);
twist_a = 0.0;
twist_b = 0.0;
}
#endif
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Twisted mass dslash has not been built");
#endif
}
}
| 261039a91de35fbf16eab810a2bb48fa372009fa.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace twisted {
#undef GPU_STAGGERED_DIRAC
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_TWISTED_MASS_DIRAC
#include <tm_dslash_def.h> // Twisted Mass kernels
#endif
#ifndef DSLASH_SHARED_FLOATS_PER_THREAD
#define DSLASH_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace twisted
// declare the dslash events
#include <dslash_events.cuh>
using namespace twisted;
#ifdef GPU_TWISTED_MASS_DIRAC
template <typename sFloat, typename gFloat>
class TwistedDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistDslashType dslashType;
double a, b, c, d;
protected:
unsigned int sharedBytesPerThread() const
{
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return DSLASH_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
}
public:
TwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x,
const QudaTwistDslashType dslashType, const double kappa, const double mu,
const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), dslashType(dslashType)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
if (dslashType == QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for twisted-mass Dslash");
dslashParam.fl_stride = in->VolumeCB();
}
virtual ~TwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
strcat(key.aux,",TwistInvDslash");
break;
case QUDA_DEG_DSLASH_TWIST_INV:
strcat(key.aux,",");
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
strcat(key.aux,",DslashTwist");
break;
default:
errorQuda("Unsupported twisted-dslash type %d", dslashType);
}
return key;
}
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
dslashParam.block[0] = tp.aux.x; dslashParam.block[1] = tp.aux.y; dslashParam.block[2] = tp.aux.z; dslashParam.block[3] = tp.aux.w;
for (int i=0; i<4; i++) dslashParam.grid[i] = ( (i==0 ? 2 : 1) * in->X(i)) / dslashParam.block[i];
switch(dslashType){
case QUDA_DEG_TWIST_INV_DSLASH:
DSLASH(twistedMassTwistInvDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_INV:
DSLASH(twistedMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)(x ? x->V() : 0), (float*)(x ? x->Norm() : 0));
break;
case QUDA_DEG_DSLASH_TWIST_XPAY:
DSLASH(twistedMassDslashTwist, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam,
(sFloat*)out->V(), (float*)out->Norm(), gauge0, gauge1,
(sFloat*)in->V(), (float*)in->Norm(), a, b, (sFloat*)x->V(), (float*)x->Norm());
break;
default: errorQuda("Invalid twisted mass dslash type");
}
}
long long flops() const {
int twisted_flops = 48;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
// twisted mass flops are done in the interior kernel
flops += twisted_flops * in->VolumeCB();
break;
}
return flops;
}
};
#endif // GPU_TWISTED_MASS_DIRAC
#include <dslash_policy.cuh>
void twistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistDslashType type,
const double &kappa, const double &mu, const double &epsilon,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
inSpinor->allocateGhostBuffer(1);
#ifdef GPU_TWISTED_MASS_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = in->Volume();
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i][0] = in->GhostOffset(i,0)/in->FieldOrder();
dslashParam.ghostOffset[i][1] = in->GhostOffset(i,1)/in->FieldOrder();
dslashParam.ghostNormOffset[i][0] = in->GhostNormOffset(i,0);
dslashParam.ghostNormOffset[i][1] = in->GhostNormOffset(i,1);
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = in->GhostFace()[i];
}
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(true);
twist_a = kappa;
twist_b = mu;
}
#endif
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new TwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new TwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new TwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
}
#ifndef GPU_COMMS
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
dslash_policy.apply(0);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslashImp;
#endif
delete dslash;
#ifdef MULTI_GPU
if(type == QUDA_DEG_TWIST_INV_DSLASH){
setTwistPack(false);
twist_a = 0.0;
twist_b = 0.0;
}
#endif
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Twisted mass dslash has not been built");
#endif
}
}
|
f0b4c1ffac109aadd9e13c6faec4e8fc9dbe2e8c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/label_smooth_op.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void LabelSmoothRunOriginKernel(const int N, const float epsilon,
const int label_dim, const T* src,
T* dst) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < N; idx += blockDim.x * gridDim.x) {
dst[idx] = static_cast<T>(1 - epsilon) * src[idx] +
static_cast<T>(epsilon / label_dim);
}
}
template <typename T>
__global__ void LabelSmoothRunDistKernel(const int N, const float epsilon,
const int dist_numel, const T* src,
const T* dist_data, T* dst) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < N; idx += blockDim.x * gridDim.x) {
int dist_idx = idx % dist_numel;
dst[idx] = static_cast<T>(1 - epsilon) * src[idx] +
static_cast<T>(epsilon) * dist_data[dist_idx];
}
}
template <typename T>
__global__ void LabelSmoothGradRunKernel(const int N, const float epsilon,
const T* src, T* dst) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < N; idx += blockDim.x * gridDim.x) {
dst[idx] = static_cast<T>(1 - epsilon) * src[idx];
}
}
template <typename DeviceContext, typename T>
class LabelSmoothGPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* out_t = ctx.Output<framework::LoDTensor>("Out");
auto* in_t = ctx.Input<framework::LoDTensor>("X");
auto* dist_t = ctx.Input<framework::Tensor>("PriorDist");
auto label_dim = in_t->dims()[in_t->dims().size() - 1];
auto epsilon = ctx.Attr<float>("epsilon");
auto& dev = *ctx.template device_context<DeviceContext>().eigen_device();
auto size_prob = in_t->numel();
const T* in_data = in_t->data<T>();
T* out_data = out_t->mutable_data<T>(ctx.GetPlace());
int threads = 512;
int grid = (size_prob + threads - 1) / threads;
auto stream = ctx.cuda_device_context().stream();
if (dist_t) {
auto dist_numel = dist_t->numel();
const T* dist_data = dist_t->data<T>();
hipLaunchKernelGGL(( LabelSmoothRunDistKernel<T>), dim3(grid), dim3(threads), 0, stream,
size_prob, epsilon, dist_numel, in_data, dist_data, out_data);
} else {
hipLaunchKernelGGL(( LabelSmoothRunOriginKernel<T>), dim3(grid), dim3(threads), 0, stream,
size_prob, epsilon, label_dim, in_data, out_data);
}
}
};
template <typename DeviceContext, typename T>
class LabelSmoothGradGPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* d_out_t = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* d_in_t = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
d_in_t->mutable_data<T>(ctx.GetPlace());
auto epsilon = ctx.Attr<float>("epsilon");
auto& dev = *ctx.template device_context<DeviceContext>().eigen_device();
const T* in_data = d_out_t->data<T>();
auto size_prob = d_out_t->numel();
T* out_data = d_in_t->mutable_data<T>(ctx.GetPlace());
int threads = 512;
int grid = (size_prob + threads - 1) / threads;
auto stream = ctx.cuda_device_context().stream();
hipLaunchKernelGGL(( LabelSmoothGradRunKernel<T>), dim3(grid), dim3(threads), 0, stream,
size_prob, epsilon, in_data, out_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
label_smooth,
ops::LabelSmoothGPUKernel<paddle::platform::CUDADeviceContext, float>,
ops::LabelSmoothGPUKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
label_smooth_grad,
ops::LabelSmoothGradGPUKernel<paddle::platform::CUDADeviceContext, float>,
ops::LabelSmoothGradGPUKernel<paddle::platform::CUDADeviceContext, double>);
| f0b4c1ffac109aadd9e13c6faec4e8fc9dbe2e8c.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/label_smooth_op.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void LabelSmoothRunOriginKernel(const int N, const float epsilon,
const int label_dim, const T* src,
T* dst) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < N; idx += blockDim.x * gridDim.x) {
dst[idx] = static_cast<T>(1 - epsilon) * src[idx] +
static_cast<T>(epsilon / label_dim);
}
}
template <typename T>
__global__ void LabelSmoothRunDistKernel(const int N, const float epsilon,
const int dist_numel, const T* src,
const T* dist_data, T* dst) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < N; idx += blockDim.x * gridDim.x) {
int dist_idx = idx % dist_numel;
dst[idx] = static_cast<T>(1 - epsilon) * src[idx] +
static_cast<T>(epsilon) * dist_data[dist_idx];
}
}
template <typename T>
__global__ void LabelSmoothGradRunKernel(const int N, const float epsilon,
const T* src, T* dst) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
for (; idx < N; idx += blockDim.x * gridDim.x) {
dst[idx] = static_cast<T>(1 - epsilon) * src[idx];
}
}
template <typename DeviceContext, typename T>
class LabelSmoothGPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* out_t = ctx.Output<framework::LoDTensor>("Out");
auto* in_t = ctx.Input<framework::LoDTensor>("X");
auto* dist_t = ctx.Input<framework::Tensor>("PriorDist");
auto label_dim = in_t->dims()[in_t->dims().size() - 1];
auto epsilon = ctx.Attr<float>("epsilon");
auto& dev = *ctx.template device_context<DeviceContext>().eigen_device();
auto size_prob = in_t->numel();
const T* in_data = in_t->data<T>();
T* out_data = out_t->mutable_data<T>(ctx.GetPlace());
int threads = 512;
int grid = (size_prob + threads - 1) / threads;
auto stream = ctx.cuda_device_context().stream();
if (dist_t) {
auto dist_numel = dist_t->numel();
const T* dist_data = dist_t->data<T>();
LabelSmoothRunDistKernel<T><<<grid, threads, 0, stream>>>(
size_prob, epsilon, dist_numel, in_data, dist_data, out_data);
} else {
LabelSmoothRunOriginKernel<T><<<grid, threads, 0, stream>>>(
size_prob, epsilon, label_dim, in_data, out_data);
}
}
};
template <typename DeviceContext, typename T>
class LabelSmoothGradGPUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const {
auto* d_out_t = ctx.Input<framework::Tensor>(framework::GradVarName("Out"));
auto* d_in_t = ctx.Output<framework::Tensor>(framework::GradVarName("X"));
d_in_t->mutable_data<T>(ctx.GetPlace());
auto epsilon = ctx.Attr<float>("epsilon");
auto& dev = *ctx.template device_context<DeviceContext>().eigen_device();
const T* in_data = d_out_t->data<T>();
auto size_prob = d_out_t->numel();
T* out_data = d_in_t->mutable_data<T>(ctx.GetPlace());
int threads = 512;
int grid = (size_prob + threads - 1) / threads;
auto stream = ctx.cuda_device_context().stream();
LabelSmoothGradRunKernel<T><<<grid, threads, 0, stream>>>(
size_prob, epsilon, in_data, out_data);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
label_smooth,
ops::LabelSmoothGPUKernel<paddle::platform::CUDADeviceContext, float>,
ops::LabelSmoothGPUKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
label_smooth_grad,
ops::LabelSmoothGradGPUKernel<paddle::platform::CUDADeviceContext, float>,
ops::LabelSmoothGradGPUKernel<paddle::platform::CUDADeviceContext, double>);
|
911571eeeeb29a4cc38b2a9edfcaa807e63c1d66.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/cross_entropy_op.h"
namespace paddle {
namespace operators {
namespace {
template <typename T>
__global__ void CrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
const int64_t* label, const int N,
const int D) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
int idx = i * D + label[i];
dX[idx] = -dY[i] / X[idx];
}
}
template <typename T>
__global__ void SoftCrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
const T* label, const int N,
const int D) {
int ids = blockIdx.x * blockDim.x + threadIdx.x;
if (ids < N * D) {
int row_ids = ids / D;
dX[ids] = -label[ids] * dY[row_ids] / X[ids];
}
}
} // namespace
template <typename T>
class CrossEntropyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* label = ctx.Input<Tensor>("Label");
Tensor* y = ctx.Output<Tensor>("Y");
y->mutable_data<T>(ctx.GetPlace());
math::CrossEntropyFunctor<platform::GPUPlace, T>()(
ctx.device_context(), y, x, label, ctx.Attr<bool>("soft_label"));
}
};
template <typename T>
class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* label = ctx.Input<Tensor>("Label");
Tensor* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
dx->mutable_data<T>(ctx.GetPlace());
const T* dy_data =
ctx.Input<Tensor>(framework::GradVarName("Y"))->data<T>();
T* dx_data = dx->mutable_data<T>(ctx.GetPlace());
const T* x_data = x->data<T>();
int64_t batch_size = x->dims()[0];
int64_t class_num = x->dims()[1];
int block = 512;
int grid = (batch_size * class_num + block - 1) / block;
auto stream = ctx.cuda_device_context().stream();
if (ctx.Attr<bool>("soft_label")) {
auto* label_data = label->data<T>();
hipLaunchKernelGGL(( SoftCrossEntropyGradientKernel<T>), dim3(grid), dim3(block), 0, stream,
dx_data, dy_data, x_data, label_data, batch_size, class_num);
} else {
math::SetConstant<platform::GPUPlace, T> functor;
functor(ctx.device_context(), dx, 0);
auto* label_data = label->data<int64_t>();
grid = (batch_size + block - 1) / block;
hipLaunchKernelGGL(( CrossEntropyGradientKernel<T>), dim3(grid), dim3(block), 0, stream,
dx_data, dy_data, x_data, label_data, batch_size, class_num);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(cross_entropy, ops::CrossEntropyOpCUDAKernel<float>,
ops::CrossEntropyOpCUDAKernel<double>);
REGISTER_OP_GPU_KERNEL(cross_entropy_grad,
ops::CrossEntropyGradientOpCUDAKernel<float>,
ops::CrossEntropyGradientOpCUDAKernel<double>);
| 911571eeeeb29a4cc38b2a9edfcaa807e63c1d66.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/cross_entropy_op.h"
namespace paddle {
namespace operators {
namespace {
template <typename T>
__global__ void CrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
const int64_t* label, const int N,
const int D) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N;
i += blockDim.x * gridDim.x) {
int idx = i * D + label[i];
dX[idx] = -dY[i] / X[idx];
}
}
template <typename T>
__global__ void SoftCrossEntropyGradientKernel(T* dX, const T* dY, const T* X,
const T* label, const int N,
const int D) {
int ids = blockIdx.x * blockDim.x + threadIdx.x;
if (ids < N * D) {
int row_ids = ids / D;
dX[ids] = -label[ids] * dY[row_ids] / X[ids];
}
}
} // namespace
template <typename T>
class CrossEntropyOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* label = ctx.Input<Tensor>("Label");
Tensor* y = ctx.Output<Tensor>("Y");
y->mutable_data<T>(ctx.GetPlace());
math::CrossEntropyFunctor<platform::GPUPlace, T>()(
ctx.device_context(), y, x, label, ctx.Attr<bool>("soft_label"));
}
};
template <typename T>
class CrossEntropyGradientOpCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"This kernel only runs on GPU device.");
const Tensor* x = ctx.Input<Tensor>("X");
const Tensor* label = ctx.Input<Tensor>("Label");
Tensor* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
dx->mutable_data<T>(ctx.GetPlace());
const T* dy_data =
ctx.Input<Tensor>(framework::GradVarName("Y"))->data<T>();
T* dx_data = dx->mutable_data<T>(ctx.GetPlace());
const T* x_data = x->data<T>();
int64_t batch_size = x->dims()[0];
int64_t class_num = x->dims()[1];
int block = 512;
int grid = (batch_size * class_num + block - 1) / block;
auto stream = ctx.cuda_device_context().stream();
if (ctx.Attr<bool>("soft_label")) {
auto* label_data = label->data<T>();
SoftCrossEntropyGradientKernel<T><<<grid, block, 0, stream>>>(
dx_data, dy_data, x_data, label_data, batch_size, class_num);
} else {
math::SetConstant<platform::GPUPlace, T> functor;
functor(ctx.device_context(), dx, 0);
auto* label_data = label->data<int64_t>();
grid = (batch_size + block - 1) / block;
CrossEntropyGradientKernel<T><<<grid, block, 0, stream>>>(
dx_data, dy_data, x_data, label_data, batch_size, class_num);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL(cross_entropy, ops::CrossEntropyOpCUDAKernel<float>,
ops::CrossEntropyOpCUDAKernel<double>);
REGISTER_OP_GPU_KERNEL(cross_entropy_grad,
ops::CrossEntropyGradientOpCUDAKernel<float>,
ops::CrossEntropyGradientOpCUDAKernel<double>);
|
a0c0cebcf4b0aaead1fecd3aa20b92ee121db2b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "decode.h"
#include "stdio.h"
#include <iostream>
#include <cassert>
#ifndef CUDA_CHECK
#define CUDA_CHECK(callstr) \
{ \
hipError_t error_code = callstr; \
if (error_code != hipSuccess) { \
std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \
assert(0); \
} \
}
#endif
namespace nvinfer1
{
DecodePlugin::DecodePlugin()
{
conf_thresh = -log(1 / decodeplugin::CONF_THRESH - 1);
loc_len = decodeplugin::REG_MAX + 1;
row_num = decodeplugin::CLASS_NUM + loc_len * 4;
//std::cout<<"conf_thresh: "<<conf_thresh<<std::endl;
}
DecodePlugin::~DecodePlugin()
{
}
// create the plugin at runtime from a byte stream
DecodePlugin::DecodePlugin(const void* data, size_t length)
{
}
void DecodePlugin::serialize(void* buffer) const
{
}
size_t DecodePlugin::getSerializationSize() const
{
return 0;
}
int DecodePlugin::initialize()
{
return 0;
}
Dims DecodePlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = decodeplugin::MAX_OUT * sizeof(decodeplugin::Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void DecodePlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* DecodePlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType DecodePlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool DecodePlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool DecodePlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void DecodePlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void DecodePlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void DecodePlugin::detachFromContext() {}
const char* DecodePlugin::getPluginType() const
{
return "NANODET_TRT";
}
const char* DecodePlugin::getPluginVersion() const
{
return "1";
}
void DecodePlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* DecodePlugin::clone() const
{
DecodePlugin *p = new DecodePlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1./(1. + expf(-data)); }; //sigmoid func
__global__ void CalDetection(const float *input, float *output, int total_grid, int row_num, int num_elem,
int output_elem, const int loc_len, const float obj_thresh) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= num_elem) return;
const float* curInput = input + idx * row_num; //
int bn_idx = idx / total_grid; //batch_id
idx %= total_grid; //offset idx in batch_id
int stride = 8;
int offset = 0;
if (idx >= 1600) {
stride = (idx >=2000) ? 32 : 16;
offset = stride == 16 ? 1600: 2000;
}
int max_classes = 0;
float max_class_confidence = -1;
for (int j = 0; j < decodeplugin::CLASS_NUM; ++j, ++curInput) {
if (*curInput > max_class_confidence) {
max_classes = j;
max_class_confidence = *curInput;
}
}
if (max_class_confidence < obj_thresh) //obj_thresh, decodeplugin::CONF_THRESH
return;
//printf("conf: %f, thresh: %f\n", max_class_confidence, decodeplugin::CONF_THRESH);
float *res_count = output + bn_idx * output_elem;
int count = (int)atomicAdd(res_count, 1);
if (count >= decodeplugin::MAX_OUT) return;
//todo construct detection
int width = decodeplugin::INPUT_W / stride;
int cell_idx = idx - offset;
int celly = cell_idx / width;
int cellx = cell_idx % width;
float ct_x = (cellx + 0.5) * stride;
float ct_y = (celly + 0.5) * stride;
float* dis_pred = new float[4];
for (int i = 0; i < 4; i++) {
const float* ptr = curInput + i * loc_len;
const float* ptrmax = ptr;
float alpha = *ptrmax;
for (int j = 1; j < loc_len; ++j, ++ptrmax) {
if (*ptrmax > alpha) {
alpha = *ptrmax;
}
} //
float denominator = 0;
float dis = 0;
for (int j = 0; j < loc_len; ++j) {
float tmp_dis = exp(ptr[j] - alpha);
denominator += tmp_dis;
dis += j * tmp_dis;
} //softmax
dis /= denominator;
dis *= stride;
dis_pred[i] = dis;
}
char* data = (char *)res_count + sizeof(float) + count * sizeof(decodeplugin::Detection);
decodeplugin::Detection* det = (decodeplugin::Detection*)(data);
det->bbox[0] = (ct_x - dis_pred[0]); //x1
det->bbox[1] = (ct_y - dis_pred[1]); //y1
det->bbox[2] = (ct_x + dis_pred[2]); //x2
det->bbox[3] = (ct_y + dis_pred[3]); //y2
det->bbox[4] = (dis_pred[2] + dis_pred[0]) * (dis_pred[3] + dis_pred[1]); //s
delete[] dis_pred;
det->class_id = max_classes;
det->conf = Logist(max_class_confidence);
}
void DecodePlugin::forwardGpu(const float *const * inputs, float * output, hipStream_t stream, int batchSize)
{
int outputElem = 1 + decodeplugin::MAX_OUT * sizeof(decodeplugin::Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(hipMemset(output + idx * outputElem, 0, sizeof(float))); //set total_num=0
}
int total_num_elem = refer_rows * batchSize;
std::cout<<"total_num_elem: "<<total_num_elem<<std::endl;
CalDetection << < (total_num_elem + thread_count_ - 1) / thread_count_, thread_count_ , 0, stream >> > (inputs[0],
output, refer_rows, row_num, total_num_elem, outputElem, loc_len, conf_thresh);
}
int DecodePlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
//GPU
//CUDA_CHECK(hipStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float *)outputs[0], stream, batchSize);
return 0;
};
PluginFieldCollection DecodePluginCreator::mFC{};
std::vector<PluginField> DecodePluginCreator::mPluginAttributes;
DecodePluginCreator::DecodePluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* DecodePluginCreator::getPluginName() const
{
return "NANODET_TRT";
}
const char* DecodePluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* DecodePluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* DecodePluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
DecodePlugin* obj = new DecodePlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* DecodePluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call PReluPlugin::destroy()
DecodePlugin* obj = new DecodePlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
| a0c0cebcf4b0aaead1fecd3aa20b92ee121db2b2.cu | #include "decode.h"
#include "stdio.h"
#include <iostream>
#include <cassert>
#ifndef CUDA_CHECK
#define CUDA_CHECK(callstr) \
{ \
cudaError_t error_code = callstr; \
if (error_code != cudaSuccess) { \
std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \
assert(0); \
} \
}
#endif
namespace nvinfer1
{
DecodePlugin::DecodePlugin()
{
conf_thresh = -log(1 / decodeplugin::CONF_THRESH - 1);
loc_len = decodeplugin::REG_MAX + 1;
row_num = decodeplugin::CLASS_NUM + loc_len * 4;
//std::cout<<"conf_thresh: "<<conf_thresh<<std::endl;
}
DecodePlugin::~DecodePlugin()
{
}
// create the plugin at runtime from a byte stream
DecodePlugin::DecodePlugin(const void* data, size_t length)
{
}
void DecodePlugin::serialize(void* buffer) const
{
}
size_t DecodePlugin::getSerializationSize() const
{
return 0;
}
int DecodePlugin::initialize()
{
return 0;
}
Dims DecodePlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalsize = decodeplugin::MAX_OUT * sizeof(decodeplugin::Detection) / sizeof(float);
return Dims3(totalsize + 1, 1, 1);
}
// Set plugin namespace
void DecodePlugin::setPluginNamespace(const char* pluginNamespace)
{
mPluginNamespace = pluginNamespace;
}
const char* DecodePlugin::getPluginNamespace() const
{
return mPluginNamespace;
}
// Return the DataType of the plugin output at the requested index
DataType DecodePlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const
{
return DataType::kFLOAT;
}
// Return true if output tensor is broadcast across a batch.
bool DecodePlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const
{
return false;
}
// Return true if plugin can use input that is broadcast across batch without replication.
bool DecodePlugin::canBroadcastInputAcrossBatch(int inputIndex) const
{
return false;
}
void DecodePlugin::configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput)
{
}
// Attach the plugin object to an execution context and grant the plugin the access to some context resource.
void DecodePlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator)
{
}
// Detach the plugin object from its execution context.
void DecodePlugin::detachFromContext() {}
const char* DecodePlugin::getPluginType() const
{
return "NANODET_TRT";
}
const char* DecodePlugin::getPluginVersion() const
{
return "1";
}
void DecodePlugin::destroy()
{
delete this;
}
// Clone the plugin
IPluginV2IOExt* DecodePlugin::clone() const
{
DecodePlugin *p = new DecodePlugin();
p->setPluginNamespace(mPluginNamespace);
return p;
}
__device__ float Logist(float data){ return 1./(1. + expf(-data)); }; //sigmoid func
__global__ void CalDetection(const float *input, float *output, int total_grid, int row_num, int num_elem,
int output_elem, const int loc_len, const float obj_thresh) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= num_elem) return;
const float* curInput = input + idx * row_num; //输入的行
int bn_idx = idx / total_grid; //batch_id
idx %= total_grid; //offset idx in batch_id
int stride = 8;
int offset = 0;
if (idx >= 1600) {
stride = (idx >=2000) ? 32 : 16;
offset = stride == 16 ? 1600: 2000;
}
int max_classes = 0;
float max_class_confidence = -1;
for (int j = 0; j < decodeplugin::CLASS_NUM; ++j, ++curInput) {
if (*curInput > max_class_confidence) {
max_classes = j;
max_class_confidence = *curInput;
}
}
if (max_class_confidence < obj_thresh) //obj_thresh, decodeplugin::CONF_THRESH
return;
//printf("conf: %f, thresh: %f\n", max_class_confidence, decodeplugin::CONF_THRESH);
float *res_count = output + bn_idx * output_elem;
int count = (int)atomicAdd(res_count, 1);
if (count >= decodeplugin::MAX_OUT) return;
//todo construct detection
int width = decodeplugin::INPUT_W / stride;
int cell_idx = idx - offset;
int celly = cell_idx / width;
int cellx = cell_idx % width;
float ct_x = (cellx + 0.5) * stride;
float ct_y = (celly + 0.5) * stride;
float* dis_pred = new float[4];
for (int i = 0; i < 4; i++) {
const float* ptr = curInput + i * loc_len;
const float* ptrmax = ptr;
float alpha = *ptrmax;
for (int j = 1; j < loc_len; ++j, ++ptrmax) {
if (*ptrmax > alpha) {
alpha = *ptrmax;
}
} //计算最大值
float denominator = 0;
float dis = 0;
for (int j = 0; j < loc_len; ++j) {
float tmp_dis = exp(ptr[j] - alpha);
denominator += tmp_dis;
dis += j * tmp_dis;
} //softmax分母
dis /= denominator;
dis *= stride;
dis_pred[i] = dis;
}
char* data = (char *)res_count + sizeof(float) + count * sizeof(decodeplugin::Detection);
decodeplugin::Detection* det = (decodeplugin::Detection*)(data);
det->bbox[0] = (ct_x - dis_pred[0]); //x1
det->bbox[1] = (ct_y - dis_pred[1]); //y1
det->bbox[2] = (ct_x + dis_pred[2]); //x2
det->bbox[3] = (ct_y + dis_pred[3]); //y2
det->bbox[4] = (dis_pred[2] + dis_pred[0]) * (dis_pred[3] + dis_pred[1]); //s
delete[] dis_pred;
det->class_id = max_classes;
det->conf = Logist(max_class_confidence);
}
void DecodePlugin::forwardGpu(const float *const * inputs, float * output, cudaStream_t stream, int batchSize)
{
int outputElem = 1 + decodeplugin::MAX_OUT * sizeof(decodeplugin::Detection) / sizeof(float);
for (int idx = 0; idx < batchSize; ++idx) {
CUDA_CHECK(cudaMemset(output + idx * outputElem, 0, sizeof(float))); //set total_num=0
}
int total_num_elem = refer_rows * batchSize;
std::cout<<"total_num_elem: "<<total_num_elem<<std::endl;
CalDetection << < (total_num_elem + thread_count_ - 1) / thread_count_, thread_count_ , 0, stream >> > (inputs[0],
output, refer_rows, row_num, total_num_elem, outputElem, loc_len, conf_thresh);
}
int DecodePlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
//GPU
//CUDA_CHECK(cudaStreamSynchronize(stream));
forwardGpu((const float *const *)inputs, (float *)outputs[0], stream, batchSize);
return 0;
};
PluginFieldCollection DecodePluginCreator::mFC{};
std::vector<PluginField> DecodePluginCreator::mPluginAttributes;
DecodePluginCreator::DecodePluginCreator()
{
mPluginAttributes.clear();
mFC.nbFields = mPluginAttributes.size();
mFC.fields = mPluginAttributes.data();
}
const char* DecodePluginCreator::getPluginName() const
{
return "NANODET_TRT";
}
const char* DecodePluginCreator::getPluginVersion() const
{
return "1";
}
const PluginFieldCollection* DecodePluginCreator::getFieldNames()
{
return &mFC;
}
IPluginV2IOExt* DecodePluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc)
{
DecodePlugin* obj = new DecodePlugin();
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
IPluginV2IOExt* DecodePluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength)
{
// This object will be deleted when the network is destroyed, which will
// call PReluPlugin::destroy()
DecodePlugin* obj = new DecodePlugin(serialData, serialLength);
obj->setPluginNamespace(mNamespace.c_str());
return obj;
}
}
|
d907a3e6540f2c3e5109fc8ef011c40b35b1cad0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixSubScalar(double *a, double b, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = a[y * cc + x]-b;
}
} | d907a3e6540f2c3e5109fc8ef011c40b35b1cad0.cu | #include "includes.h"
__global__ void matrixSubScalar(double *a, double b, double *c, int cr, int cc){
int x = blockIdx.x * blockDim.x + threadIdx.x; // col
int y = blockIdx.y * blockDim.y + threadIdx.y; // row
if(x < cc && y < cr){
c[y * cc + x] = a[y * cc + x]-b;
}
} |
tridiag_kernels.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include <hip/hip_runtime.h>
#include "include/device_utils.cu.h"
#include "include/constants.hpp"
#include "include/pbbKernels.cu.h"
__device__ inline void filltup4(DTYPE ai, DTYPE bi, DTYPE cim1, unsigned int index, volatile typename tuple4op<DTYPE>::RedElTp* shared, unsigned int n)
{
pbbtuple4<DTYPE> tup;
if (threadIdx.x == 0 || threadIdx.x >= n)
{
tup.a = 1;
tup.b = 0;
tup.c = 0;
tup.d = 1;
}
else
{
tup.a = bi;
tup.b = -ai*cim1;
tup.c = 1;
tup.d = 0;
}
shared[threadIdx.x] = tup;
}
__device__ inline void filltup2_1(DTYPE ai, DTYPE* b_tmp, DTYPE di, unsigned int index, volatile typename tuple2op<DTYPE>::RedElTp* shared, unsigned int n)
{
pbbtuple2<DTYPE> tup;
if (threadIdx.x == 0 || threadIdx.x >= n)
{
tup.a = 0;
tup.b = 1;
}
else
{
tup.a = di;
tup.b = -ai/b_tmp[threadIdx.x-1];
}
shared[threadIdx.x] = tup;
}
__device__ inline void filltup2_2(DTYPE* b_shared, DTYPE* c_shared, DTYPE* d_shared, unsigned int datastart, volatile typename tuple2op<DTYPE>::RedElTp* shared, unsigned int n)
{
int newIdx = n - threadIdx.x - 1;
pbbtuple2<DTYPE> tup;
if (threadIdx.x == 0 || threadIdx.x >= n)
{
tup.a = 0;
tup.b = 1;
}
else
{
DTYPE b_tmp = b_shared[newIdx];
tup.a = d_shared[newIdx]/b_tmp;
tup.b = -c_shared[newIdx]/b_tmp; // c[datastart + newIdx]
}
__syncthreads();
shared[threadIdx.x] = tup;
}
__global__ void tridiag_shared(DTYPE* a, DTYPE* b, DTYPE* c, DTYPE* d, DTYPE* out, unsigned int n)
{
extern __shared__ DTYPE shared[];
DTYPE* b_shared = shared;
DTYPE* d_shared = shared+blockDim.x;
DTYPE* empty_space = shared+2*blockDim.x;
volatile typename tuple4op<DTYPE>::RedElTp* tuple4ptr = reinterpret_cast<typename tuple4op<DTYPE>::RedElTp*>(shared);
volatile typename tuple2op<DTYPE>::RedElTp* tuple2ptr = reinterpret_cast<typename tuple2op<DTYPE>::RedElTp*>(empty_space);
const unsigned int datastart = blockIdx.x * n;
const unsigned int index = datastart + threadIdx.x;
DTYPE ai;
DTYPE bi;
DTYPE cim1;
DTYPE di;
if (threadIdx.x < n)
{
ai = a[index];
bi = b[index];
di = d[index];
if (threadIdx.x == 0)
{
cim1 = c[datastart+n-1];
}
else
{
cim1 = c[index-1];
}
}
// if (threadIdx.x < n)
filltup4(ai, bi, cim1, index, tuple4ptr, n);
__syncthreads();
typename tuple4op<DTYPE>::RedElTp tup4 = scanIncBlock<tuple4op<DTYPE>>(tuple4ptr, threadIdx.x);
DTYPE b0 = b[datastart];
__syncthreads();
if (threadIdx.x < n)
b_shared[threadIdx.x] = (tup4.a*b0+tup4.b) / (tup4.c*b0 + tup4.d);
__syncthreads();
// if (threadIdx.x < n)
filltup2_1(ai, b_shared, di, index, tuple2ptr, n);
__syncthreads();
typename tuple2op<DTYPE>::RedElTp tup2 = scanIncBlock<tuple2op<DTYPE>>(tuple2ptr, threadIdx.x);
DTYPE d0 = d[datastart];
__syncthreads();
if (threadIdx.x < n)
d_shared[threadIdx.x] = tup2.a + tup2.b*d0;
__syncthreads();
DTYPE d_div_b = d_shared[n-1] / b_shared[n-1];
__syncthreads();
if (threadIdx.x == 0)
{
empty_space[n-1] = cim1;
}
else if (threadIdx.x < n)
{
empty_space[threadIdx.x-1] = cim1;
}
__syncthreads();
filltup2_2(b_shared, empty_space, d_shared, datastart, tuple2ptr, n);
__syncthreads();
tup2 = scanIncBlock<tuple2op<DTYPE>>(tuple2ptr, threadIdx.x);
__syncthreads();
if (threadIdx.x < n)
out[datastart + n - threadIdx.x - 1] = tup2.a + tup2.b * d_div_b;
}
__global__ void recurrence1_no_const(DTYPE* a, DTYPE* b, DTYPE* c, unsigned int num_chunks, unsigned int n)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx>=num_chunks)
return;
const unsigned int chunk_start = idx * n;
for (int i = chunk_start + 1 ; i < chunk_start + n ; i++)
{
b[i] -= a[i]*c[i-1]/b[i-1];
}
}
__global__ void recurrence1(DTYPE* a, DTYPE* b, DTYPE* c, unsigned int num_chunks)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx>=num_chunks)
return;
const unsigned int chunk_start = idx * TRIDIAG_INNER_DIM;
// const unsigned int chunk_end = chunk_start + n;
DTYPE as[TRIDIAG_INNER_DIM-1];
DTYPE bs[TRIDIAG_INNER_DIM];
DTYPE cs[TRIDIAG_INNER_DIM-1];
// #pragma unroll
for (int i = 0 ; i < TRIDIAG_INNER_DIM ; i++)
{
int loc = chunk_start + i;
as[i] = a[loc+1];
bs[i] = b[loc];
cs[i] = c[loc];
}
// #pragma unroll
for (int i = 0 ; i < TRIDIAG_INNER_DIM -1 ; i++)
{
bs[i+1] -= as[i]*cs[i]/bs[i];
}
// #pragma unroll
for (int i = 0 ; i < TRIDIAG_INNER_DIM ; i++)
{
b[chunk_start + i] = bs[i];
}
}
__global__ void create_tuple4_r1(DTYPE *a, DTYPE *b, DTYPE *c,
tuple4<DTYPE> *tups, unsigned int total_size,
unsigned int n) {
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
tuple4<DTYPE> t;
if (idx % n != 0) {
t.a = b[idx];
t.b = -(a[idx] * c[idx - 1]);
t.c = 1;
t.d = 0;
}
tups[idx] = t;
}
__global__
void generate_keys(unsigned int* keys, unsigned int total_size, unsigned int n)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx>=total_size)
return;
keys[idx] = idx / n;
}
__global__ void get_first_elem_in_chunk(DTYPE *in, DTYPE *out,
unsigned int num_chunks,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_chunks)
return;
out[idx] = in[idx * n];
}
__global__ void combine_tuple4_r1(tuple4<DTYPE> *tups, unsigned int *keys,
DTYPE *b, DTYPE *b0s, unsigned int total_size,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
tuple4<DTYPE> t = tups[idx];
DTYPE b0 = b0s[keys[idx]];
b[idx] = (t.a * b0 + t.b) / (t.c * b0 + t.d);
}
__global__ void create_tuple2_r2(tuple2<DTYPE> *tups, DTYPE *a, DTYPE *b,
DTYPE *d, unsigned int total_size,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
tuple2<DTYPE> t;
if (idx % n == 0) {
t.a = 0;
t.b = 1;
} else {
t.a = d[idx];
t.b = -a[idx] / b[idx - 1];
}
tups[idx] = t;
}
__global__ void combine_tuple2_r2(tuple2<DTYPE> *tups, unsigned int *keys,
DTYPE *d, DTYPE *d0s, unsigned int total_size,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
tuple2<DTYPE> t = tups[idx];
DTYPE d0 = d0s[keys[idx]];
d[idx] = t.a + t.b * d0;
}
__global__ void get_last_yb_div_in_chunk(DTYPE *d, DTYPE *b, DTYPE *lastDiv,
unsigned int num_chunks,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_chunks)
return;
const int n1 = idx * n + (n - 1);
lastDiv[idx] = d[n1] / b[n1];
}
__global__ void create_tuple2_r3(tuple2<DTYPE> *tups, unsigned int *keys,
DTYPE *b, DTYPE *c, DTYPE *d,
unsigned int total_size, unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
const unsigned int revIdx = n * keys[idx] + (n - (idx % n) - 1);
tuple2<DTYPE> t;
if (idx % n == 0) {
t.a = 0;
t.b = 1;
} else {
DTYPE rb = b[revIdx];
t.a = d[revIdx] / rb;
t.b = -c[revIdx] / rb;
}
tups[idx] = t;
}
__global__ void combine_tuple2_and_reverse_r3(tuple2<DTYPE> *tups,
unsigned int *keys,
DTYPE *lastDivs, DTYPE *d,
unsigned int total_size,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
unsigned int k = keys[idx];
const unsigned int revIdx = n * k + (n - (idx % n) - 1);
tuple2<DTYPE> t = tups[idx];
d[revIdx] = t.a + t.b * lastDivs[k];
}
__global__
void execute_no_const(
const DTYPE *a,
const DTYPE *b,
DTYPE *c,
DTYPE *d,
DTYPE *solution,
int total_size,
int n
){
const size_t idx = (blockIdx.x * blockDim.x + threadIdx.x) * n;
if (idx >= total_size) {
return;
}
c[idx] /= b[idx];
d[idx] /= b[idx];
DTYPE norm_factor;
// #pragma unroll
for (ptrdiff_t j = 1; j < n; ++j) {
norm_factor = 1.0 / (b[idx+j] - a[idx+j] * c[idx + j-1]);
c[idx + j] = c[idx+j] * norm_factor;
d[idx + j] = (d[idx+j] - a[idx+j] * d[idx + j-1]) * norm_factor;
}
solution[idx + n-1] = d[idx + n-1];
// #pragma unroll
for (ptrdiff_t j=n-2; j >= 0; --j) {
solution[idx + j] = d[idx + j] - c[idx + j] * solution[idx + j+1];
}
}
__global__
void execute(
const DTYPE *a,
const DTYPE *b,
const DTYPE *c,
const DTYPE *d,
DTYPE *solution,
int total_size
){
const size_t idx = (blockIdx.x * blockDim.x + threadIdx.x) * TRIDIAG_INNER_DIM;
if (idx >= total_size) {
return;
}
DTYPE cp[TRIDIAG_INNER_DIM];
DTYPE dp[TRIDIAG_INNER_DIM];
cp[0] = c[idx] / b[idx];
dp[0] = d[idx] / b[idx];
DTYPE norm_factor;
// #pragma unroll
for (ptrdiff_t j = 1; j < TRIDIAG_INNER_DIM; ++j) {
norm_factor = 1.0 / (b[idx+j] - a[idx+j] * cp[j-1]);
cp[j] = c[idx+j] * norm_factor;
dp[j] = (d[idx+j] - a[idx+j] * dp[j-1]) * norm_factor;
}
solution[idx + TRIDIAG_INNER_DIM-1] = dp[TRIDIAG_INNER_DIM-1];
// #pragma unroll
for (ptrdiff_t j=TRIDIAG_INNER_DIM-2; j >= 0; --j) {
solution[idx + j] = dp[j] - cp[j] * solution[idx + j+1];
}
}
__global__
void transpose4(
const DTYPE* a,
const DTYPE* b,
const DTYPE* c,
const DTYPE* d,
DTYPE* a_t,
DTYPE* b_t,
DTYPE* c_t,
DTYPE* d_t,
int xdim,
int ydim,
int total_size
)
{
__shared__ DTYPE tile[4*TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
if (x < xdim)
{
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
int index = (y+j)*xdim + x;
if (index < total_size)
{
tile[threadIdx.y+j][threadIdx.x] = a[index];
tile[TILE_DIM + threadIdx.y+j][threadIdx.x] = b[index];
tile[2 * TILE_DIM + threadIdx.y+j][threadIdx.x] = c[index];
tile[3 * TILE_DIM + threadIdx.y+j][threadIdx.x] = d[index];
}
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
if (x < ydim)
{
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
int index = (y+j)*ydim + x;
if (index < total_size)
{
a_t[index] = tile[threadIdx.x][threadIdx.y + j];
b_t[index] = tile[TILE_DIM + threadIdx.x][threadIdx.y + j];
c_t[index] = tile[2 * TILE_DIM + threadIdx.x][threadIdx.y + j];
d_t[index] = tile[3 * TILE_DIM + threadIdx.x][threadIdx.y + j];
}
}
}
}
__global__
void transpose(
const DTYPE* m,
DTYPE* m_t,
int xdim,
int ydim,
int total_size
)
{
__shared__ DTYPE tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
if (x < xdim)
{
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
int index = (y+j)*xdim + x;
if (index < total_size)
tile[threadIdx.y+j][threadIdx.x] = m[index];
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
if (x < ydim)
{
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
int index = (y+j)*ydim + x;
if (index < total_size)
m_t[index] = tile[threadIdx.x][threadIdx.y + j];
}
}
}
__global__
void execute_coalesced(
const DTYPE *a,
const DTYPE *b,
DTYPE *c,
DTYPE *d,
DTYPE *solution,
int n,
int num_chunks
){
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= num_chunks) {
return;
}
DTYPE b0 = b[idx];
c[idx] /= b0;
d[idx] /= b0;
DTYPE norm_factor;
unsigned int indj = idx;
DTYPE ai;
DTYPE cm1;
DTYPE dm1;
for (int j = 0; j < n-1; ++j) {
// c and d from last iteration
cm1 = c[indj];
dm1 = d[indj];
// jump to next chunk
indj += num_chunks;
ai = a[indj];
norm_factor = 1.0f / (b[indj] - ai * cm1);
c[indj] = c[indj] * norm_factor;
d[indj] = (d[indj] - ai * dm1) * norm_factor;
}
int lastIndx = idx + num_chunks*(n-1);
solution[lastIndx] = d[lastIndx];
for (int j=0; j < n-1; ++j) {
lastIndx -= num_chunks;
solution[lastIndx] = d[lastIndx] - c[lastIndx] * solution[lastIndx + num_chunks];
}
}
__global__
void execute_coalesced_const(
const DTYPE *a,
const DTYPE *b,
const DTYPE *c,
const DTYPE *d,
DTYPE *solution,
int num_chunks
){
const unsigned int n = TRIDIAG_INNER_DIM;
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= num_chunks) {
return;
}
DTYPE cp[n];
DTYPE dp[n];
cp[0] = c[idx] / b[idx];
dp[0] = d[idx] / b[idx];
// #pragma unroll
for (int j = 1; j < n; ++j) {
unsigned int indj = idx+(j*num_chunks);
const DTYPE norm_factor = (b[indj] - a[indj] * cp[j-1]);
cp[j] = c[indj] / norm_factor;
dp[j] = (d[indj] - a[indj] * dp[j-1]) / norm_factor;
}
solution[idx + num_chunks*(n-1)] = dp[n-1];
// #pragma unroll
for (int j=n-2; j >= 0; --j) {
solution[idx + num_chunks*j] = dp[j] - cp[j] * solution[idx + num_chunks*(j+1)];
}
} | tridiag_kernels.cu | #pragma once
#include <cuda_runtime.h>
#include "include/device_utils.cu.h"
#include "include/constants.hpp"
#include "include/pbbKernels.cu.h"
__device__ inline void filltup4(DTYPE ai, DTYPE bi, DTYPE cim1, unsigned int index, volatile typename tuple4op<DTYPE>::RedElTp* shared, unsigned int n)
{
pbbtuple4<DTYPE> tup;
if (threadIdx.x == 0 || threadIdx.x >= n)
{
tup.a = 1;
tup.b = 0;
tup.c = 0;
tup.d = 1;
}
else
{
tup.a = bi;
tup.b = -ai*cim1;
tup.c = 1;
tup.d = 0;
}
shared[threadIdx.x] = tup;
}
__device__ inline void filltup2_1(DTYPE ai, DTYPE* b_tmp, DTYPE di, unsigned int index, volatile typename tuple2op<DTYPE>::RedElTp* shared, unsigned int n)
{
pbbtuple2<DTYPE> tup;
if (threadIdx.x == 0 || threadIdx.x >= n)
{
tup.a = 0;
tup.b = 1;
}
else
{
tup.a = di;
tup.b = -ai/b_tmp[threadIdx.x-1];
}
shared[threadIdx.x] = tup;
}
__device__ inline void filltup2_2(DTYPE* b_shared, DTYPE* c_shared, DTYPE* d_shared, unsigned int datastart, volatile typename tuple2op<DTYPE>::RedElTp* shared, unsigned int n)
{
int newIdx = n - threadIdx.x - 1;
pbbtuple2<DTYPE> tup;
if (threadIdx.x == 0 || threadIdx.x >= n)
{
tup.a = 0;
tup.b = 1;
}
else
{
DTYPE b_tmp = b_shared[newIdx];
tup.a = d_shared[newIdx]/b_tmp;
tup.b = -c_shared[newIdx]/b_tmp; // c[datastart + newIdx]
}
__syncthreads();
shared[threadIdx.x] = tup;
}
__global__ void tridiag_shared(DTYPE* a, DTYPE* b, DTYPE* c, DTYPE* d, DTYPE* out, unsigned int n)
{
extern __shared__ DTYPE shared[];
DTYPE* b_shared = shared;
DTYPE* d_shared = shared+blockDim.x;
DTYPE* empty_space = shared+2*blockDim.x;
volatile typename tuple4op<DTYPE>::RedElTp* tuple4ptr = reinterpret_cast<typename tuple4op<DTYPE>::RedElTp*>(shared);
volatile typename tuple2op<DTYPE>::RedElTp* tuple2ptr = reinterpret_cast<typename tuple2op<DTYPE>::RedElTp*>(empty_space);
const unsigned int datastart = blockIdx.x * n;
const unsigned int index = datastart + threadIdx.x;
DTYPE ai;
DTYPE bi;
DTYPE cim1;
DTYPE di;
if (threadIdx.x < n)
{
ai = a[index];
bi = b[index];
di = d[index];
if (threadIdx.x == 0)
{
cim1 = c[datastart+n-1];
}
else
{
cim1 = c[index-1];
}
}
// if (threadIdx.x < n)
filltup4(ai, bi, cim1, index, tuple4ptr, n);
__syncthreads();
typename tuple4op<DTYPE>::RedElTp tup4 = scanIncBlock<tuple4op<DTYPE>>(tuple4ptr, threadIdx.x);
DTYPE b0 = b[datastart];
__syncthreads();
if (threadIdx.x < n)
b_shared[threadIdx.x] = (tup4.a*b0+tup4.b) / (tup4.c*b0 + tup4.d);
__syncthreads();
// if (threadIdx.x < n)
filltup2_1(ai, b_shared, di, index, tuple2ptr, n);
__syncthreads();
typename tuple2op<DTYPE>::RedElTp tup2 = scanIncBlock<tuple2op<DTYPE>>(tuple2ptr, threadIdx.x);
DTYPE d0 = d[datastart];
__syncthreads();
if (threadIdx.x < n)
d_shared[threadIdx.x] = tup2.a + tup2.b*d0;
__syncthreads();
DTYPE d_div_b = d_shared[n-1] / b_shared[n-1];
__syncthreads();
if (threadIdx.x == 0)
{
empty_space[n-1] = cim1;
}
else if (threadIdx.x < n)
{
empty_space[threadIdx.x-1] = cim1;
}
__syncthreads();
filltup2_2(b_shared, empty_space, d_shared, datastart, tuple2ptr, n);
__syncthreads();
tup2 = scanIncBlock<tuple2op<DTYPE>>(tuple2ptr, threadIdx.x);
__syncthreads();
if (threadIdx.x < n)
out[datastart + n - threadIdx.x - 1] = tup2.a + tup2.b * d_div_b;
}
__global__ void recurrence1_no_const(DTYPE* a, DTYPE* b, DTYPE* c, unsigned int num_chunks, unsigned int n)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx>=num_chunks)
return;
const unsigned int chunk_start = idx * n;
for (int i = chunk_start + 1 ; i < chunk_start + n ; i++)
{
b[i] -= a[i]*c[i-1]/b[i-1];
}
}
__global__ void recurrence1(DTYPE* a, DTYPE* b, DTYPE* c, unsigned int num_chunks)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx>=num_chunks)
return;
const unsigned int chunk_start = idx * TRIDIAG_INNER_DIM;
// const unsigned int chunk_end = chunk_start + n;
DTYPE as[TRIDIAG_INNER_DIM-1];
DTYPE bs[TRIDIAG_INNER_DIM];
DTYPE cs[TRIDIAG_INNER_DIM-1];
// #pragma unroll
for (int i = 0 ; i < TRIDIAG_INNER_DIM ; i++)
{
int loc = chunk_start + i;
as[i] = a[loc+1];
bs[i] = b[loc];
cs[i] = c[loc];
}
// #pragma unroll
for (int i = 0 ; i < TRIDIAG_INNER_DIM -1 ; i++)
{
bs[i+1] -= as[i]*cs[i]/bs[i];
}
// #pragma unroll
for (int i = 0 ; i < TRIDIAG_INNER_DIM ; i++)
{
b[chunk_start + i] = bs[i];
}
}
__global__ void create_tuple4_r1(DTYPE *a, DTYPE *b, DTYPE *c,
tuple4<DTYPE> *tups, unsigned int total_size,
unsigned int n) {
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
tuple4<DTYPE> t;
if (idx % n != 0) {
t.a = b[idx];
t.b = -(a[idx] * c[idx - 1]);
t.c = 1;
t.d = 0;
}
tups[idx] = t;
}
__global__
void generate_keys(unsigned int* keys, unsigned int total_size, unsigned int n)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx>=total_size)
return;
keys[idx] = idx / n;
}
__global__ void get_first_elem_in_chunk(DTYPE *in, DTYPE *out,
unsigned int num_chunks,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_chunks)
return;
out[idx] = in[idx * n];
}
__global__ void combine_tuple4_r1(tuple4<DTYPE> *tups, unsigned int *keys,
DTYPE *b, DTYPE *b0s, unsigned int total_size,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
tuple4<DTYPE> t = tups[idx];
DTYPE b0 = b0s[keys[idx]];
b[idx] = (t.a * b0 + t.b) / (t.c * b0 + t.d);
}
__global__ void create_tuple2_r2(tuple2<DTYPE> *tups, DTYPE *a, DTYPE *b,
DTYPE *d, unsigned int total_size,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
tuple2<DTYPE> t;
if (idx % n == 0) {
t.a = 0;
t.b = 1;
} else {
t.a = d[idx];
t.b = -a[idx] / b[idx - 1];
}
tups[idx] = t;
}
__global__ void combine_tuple2_r2(tuple2<DTYPE> *tups, unsigned int *keys,
DTYPE *d, DTYPE *d0s, unsigned int total_size,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
tuple2<DTYPE> t = tups[idx];
DTYPE d0 = d0s[keys[idx]];
d[idx] = t.a + t.b * d0;
}
__global__ void get_last_yb_div_in_chunk(DTYPE *d, DTYPE *b, DTYPE *lastDiv,
unsigned int num_chunks,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= num_chunks)
return;
const int n1 = idx * n + (n - 1);
lastDiv[idx] = d[n1] / b[n1];
}
__global__ void create_tuple2_r3(tuple2<DTYPE> *tups, unsigned int *keys,
DTYPE *b, DTYPE *c, DTYPE *d,
unsigned int total_size, unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
const unsigned int revIdx = n * keys[idx] + (n - (idx % n) - 1);
tuple2<DTYPE> t;
if (idx % n == 0) {
t.a = 0;
t.b = 1;
} else {
DTYPE rb = b[revIdx];
t.a = d[revIdx] / rb;
t.b = -c[revIdx] / rb;
}
tups[idx] = t;
}
__global__ void combine_tuple2_and_reverse_r3(tuple2<DTYPE> *tups,
unsigned int *keys,
DTYPE *lastDivs, DTYPE *d,
unsigned int total_size,
unsigned int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= total_size)
return;
unsigned int k = keys[idx];
const unsigned int revIdx = n * k + (n - (idx % n) - 1);
tuple2<DTYPE> t = tups[idx];
d[revIdx] = t.a + t.b * lastDivs[k];
}
__global__
void execute_no_const(
const DTYPE *a,
const DTYPE *b,
DTYPE *c,
DTYPE *d,
DTYPE *solution,
int total_size,
int n
){
const size_t idx = (blockIdx.x * blockDim.x + threadIdx.x) * n;
if (idx >= total_size) {
return;
}
c[idx] /= b[idx];
d[idx] /= b[idx];
DTYPE norm_factor;
// #pragma unroll
for (ptrdiff_t j = 1; j < n; ++j) {
norm_factor = 1.0 / (b[idx+j] - a[idx+j] * c[idx + j-1]);
c[idx + j] = c[idx+j] * norm_factor;
d[idx + j] = (d[idx+j] - a[idx+j] * d[idx + j-1]) * norm_factor;
}
solution[idx + n-1] = d[idx + n-1];
// #pragma unroll
for (ptrdiff_t j=n-2; j >= 0; --j) {
solution[idx + j] = d[idx + j] - c[idx + j] * solution[idx + j+1];
}
}
__global__
void execute(
const DTYPE *a,
const DTYPE *b,
const DTYPE *c,
const DTYPE *d,
DTYPE *solution,
int total_size
){
const size_t idx = (blockIdx.x * blockDim.x + threadIdx.x) * TRIDIAG_INNER_DIM;
if (idx >= total_size) {
return;
}
DTYPE cp[TRIDIAG_INNER_DIM];
DTYPE dp[TRIDIAG_INNER_DIM];
cp[0] = c[idx] / b[idx];
dp[0] = d[idx] / b[idx];
DTYPE norm_factor;
// #pragma unroll
for (ptrdiff_t j = 1; j < TRIDIAG_INNER_DIM; ++j) {
norm_factor = 1.0 / (b[idx+j] - a[idx+j] * cp[j-1]);
cp[j] = c[idx+j] * norm_factor;
dp[j] = (d[idx+j] - a[idx+j] * dp[j-1]) * norm_factor;
}
solution[idx + TRIDIAG_INNER_DIM-1] = dp[TRIDIAG_INNER_DIM-1];
// #pragma unroll
for (ptrdiff_t j=TRIDIAG_INNER_DIM-2; j >= 0; --j) {
solution[idx + j] = dp[j] - cp[j] * solution[idx + j+1];
}
}
__global__
void transpose4(
const DTYPE* a,
const DTYPE* b,
const DTYPE* c,
const DTYPE* d,
DTYPE* a_t,
DTYPE* b_t,
DTYPE* c_t,
DTYPE* d_t,
int xdim,
int ydim,
int total_size
)
{
__shared__ DTYPE tile[4*TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
if (x < xdim)
{
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
int index = (y+j)*xdim + x;
if (index < total_size)
{
tile[threadIdx.y+j][threadIdx.x] = a[index];
tile[TILE_DIM + threadIdx.y+j][threadIdx.x] = b[index];
tile[2 * TILE_DIM + threadIdx.y+j][threadIdx.x] = c[index];
tile[3 * TILE_DIM + threadIdx.y+j][threadIdx.x] = d[index];
}
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
if (x < ydim)
{
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
int index = (y+j)*ydim + x;
if (index < total_size)
{
a_t[index] = tile[threadIdx.x][threadIdx.y + j];
b_t[index] = tile[TILE_DIM + threadIdx.x][threadIdx.y + j];
c_t[index] = tile[2 * TILE_DIM + threadIdx.x][threadIdx.y + j];
d_t[index] = tile[3 * TILE_DIM + threadIdx.x][threadIdx.y + j];
}
}
}
}
__global__
void transpose(
const DTYPE* m,
DTYPE* m_t,
int xdim,
int ydim,
int total_size
)
{
__shared__ DTYPE tile[TILE_DIM][TILE_DIM+1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
if (x < xdim)
{
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
int index = (y+j)*xdim + x;
if (index < total_size)
tile[threadIdx.y+j][threadIdx.x] = m[index];
}
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset
y = blockIdx.x * TILE_DIM + threadIdx.y;
if (x < ydim)
{
for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS)
{
int index = (y+j)*ydim + x;
if (index < total_size)
m_t[index] = tile[threadIdx.x][threadIdx.y + j];
}
}
}
__global__
void execute_coalesced(
const DTYPE *a,
const DTYPE *b,
DTYPE *c,
DTYPE *d,
DTYPE *solution,
int n,
int num_chunks
){
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= num_chunks) {
return;
}
DTYPE b0 = b[idx];
c[idx] /= b0;
d[idx] /= b0;
DTYPE norm_factor;
unsigned int indj = idx;
DTYPE ai;
DTYPE cm1;
DTYPE dm1;
for (int j = 0; j < n-1; ++j) {
// c and d from last iteration
cm1 = c[indj];
dm1 = d[indj];
// jump to next chunk
indj += num_chunks;
ai = a[indj];
norm_factor = 1.0f / (b[indj] - ai * cm1);
c[indj] = c[indj] * norm_factor;
d[indj] = (d[indj] - ai * dm1) * norm_factor;
}
int lastIndx = idx + num_chunks*(n-1);
solution[lastIndx] = d[lastIndx];
for (int j=0; j < n-1; ++j) {
lastIndx -= num_chunks;
solution[lastIndx] = d[lastIndx] - c[lastIndx] * solution[lastIndx + num_chunks];
}
}
__global__
void execute_coalesced_const(
const DTYPE *a,
const DTYPE *b,
const DTYPE *c,
const DTYPE *d,
DTYPE *solution,
int num_chunks
){
const unsigned int n = TRIDIAG_INNER_DIM;
const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= num_chunks) {
return;
}
DTYPE cp[n];
DTYPE dp[n];
cp[0] = c[idx] / b[idx];
dp[0] = d[idx] / b[idx];
// #pragma unroll
for (int j = 1; j < n; ++j) {
unsigned int indj = idx+(j*num_chunks);
const DTYPE norm_factor = (b[indj] - a[indj] * cp[j-1]);
cp[j] = c[indj] / norm_factor;
dp[j] = (d[indj] - a[indj] * dp[j-1]) / norm_factor;
}
solution[idx + num_chunks*(n-1)] = dp[n-1];
// #pragma unroll
for (int j=n-2; j >= 0; --j) {
solution[idx + num_chunks*j] = dp[j] - cp[j] * solution[idx + num_chunks*(j+1)];
}
} |
d6b6a309ecda143d2239e8c80a8678e0c9a46f68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
double CpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int CpuNormalCal(int* data, const int size) {
int sum = 0;
for (int i = 0; i < size; ++i) {
sum += data[i];
}
return sum;
}
int CpuRecusiveReduce(int* data, const int size) {
//terminal check
if (size == 1) return data[0];
const int stride = size / 2;
for (int i = 0; i < stride; ++i) {
data[i] += data[i + stride];
}
return CpuRecusiveReduce(data, stride);
}
__global__ void GpuReduceNeighbored(int* g_idata, int* g_odata, unsigned int n) {
// thread id in courrent block
unsigned int tid = threadIdx.x;
// id of all thread in grid
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if (idx >= n) return;
// subarray in current block
int *indata = g_idata + blockIdx.x * blockDim.x;
// cal sum
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((tid % (2 * stride)) == 0) {
indata[tid] += indata[tid + stride];
}
__syncthreads();
}
//write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = indata[0];
}
__global__ void GpuReduceNeighboredV2(int* g_idata, int* g_odata, unsigned int n) {
// thread id in courrent block
unsigned int tid = threadIdx.x;
// id of all thread in grid
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if (idx >= n) return;
// subarray in current blocks
int *indata = g_idata + blockIdx.x * blockDim.x;
// cal sum
for (int stride = 1; stride < blockDim.x; stride *= 2) {
// convert tid into local array index (in block)
int index = 2 * stride * tid;
if (index < blockDim.x) {
indata[index] += indata[index + stride];
}
__syncthreads();
}
//write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = indata[0];
}
__global__ void GpuReduceInterleaved(int* g_idata, int* g_odata, unsigned int n) {
// thread id in courrent block
unsigned int tid = threadIdx.x;
// id of all thread in grid
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if (idx >= n) return;
// subarray in current block
int *indata = g_idata + blockIdx.x * blockDim.x;
// cal sum
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
indata[tid] += indata[tid + stride];
}
__syncthreads();
}
//write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = indata[0];
}
int main(int argc, char** argv) {
int dev = 0;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("device[%d]: %s\n", dev, deviceProp.name);
int block_size = 512;
if (argc > 1) {
block_size = atoi(argv[1]);
}
int size = 1 << 24;
printf("array size: %d\n", size);
dim3 block(block_size, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("kernal size: grid(%d, %d), block(%d, %d)\n", grid.x, grid.y, block.x, block.y);
// alloc mem
size_t bytes = size * sizeof(int);
int* h_idata = (int*)malloc(bytes);
int* h_odata = (int*)malloc(grid.x * sizeof(int));
int* tmp = (int*)malloc(bytes);
// initialize array
for (int i = 0; i < size; ++i) {
h_idata[i] = (int) (rand() & 0xFF);
}
// alloc hbm
int* d_idata = NULL;
int* d_odata = NULL;
hipMalloc((void**) &d_idata, bytes);
hipMalloc((void**) &d_odata, grid.x * sizeof(int));
int gpu_sum = 0;
// ------ kernal 1 ------
// copy input data from h to d
hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice);
// cuda kernal cal
double t1 = CpuSecond();
hipLaunchKernelGGL(( GpuReduceNeighbored), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
double elaps1 = CpuSecond() - t1;
// copy output data from d to h
hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost);
// cpu cal
for (int i = 0; i < grid.x; ++i) {
gpu_sum += h_odata[i];
}
printf("GpuReduceNeighbored result: %d, kernal elaps: %f\n", gpu_sum, elaps1);
memset(h_odata, 0, grid.x * sizeof(int));
gpu_sum = 0;
// ------ kernal 2 ------
// copy input data from h to d
hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice);
// cuda kernal cal
double t2 = CpuSecond();
hipLaunchKernelGGL(( GpuReduceNeighboredV2), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
double elaps2 = CpuSecond() - t2;
// copy output data from d to h
hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost);
// cpu cal
for (int i = 0; i < grid.x; ++i) {
gpu_sum += h_odata[i];
}
printf("GpuReduceNeighboredV2 result: %d, kernal elaps: %f\n", gpu_sum, elaps2);
memset(h_odata, 0, grid.x * sizeof(int));
gpu_sum = 0;
// ------ kernal 3 ------
// copy input data from h to d
hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice);
// cuda kernal cal
double t3 = CpuSecond();
hipLaunchKernelGGL(( GpuReduceInterleaved), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
double elaps3 = CpuSecond() - t3;
// copy output data from d to h
hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost);
// cpu cal
for (int i = 0; i < grid.x; ++i) {
gpu_sum += h_odata[i];
}
double elaps_all_3 = CpuSecond() - t3;
printf("GpuReduceInterleaved result: %d, kernal elaps: %f, all elaps: %f\n", gpu_sum, elaps3, elaps_all_3);
memset(h_odata, 0, grid.x * sizeof(int));
gpu_sum = 0;
memcpy(tmp, h_idata, bytes);
// ------ cpu 1 ------
double t4 = CpuSecond();
int cpu_sum1 = CpuNormalCal(tmp, size);
double elaps_all_4 = CpuSecond() - t4;
// ------ cpu 2 ------
double t5 = CpuSecond();
int cpu_sum2 = CpuRecusiveReduce(tmp, size);
double elaps_all_5 = CpuSecond() - t5;
printf("cpu normal result: %d, elaps_all: %f\n", cpu_sum1, elaps_all_4);
printf("cpu recusize result: %d elaps_all: %f\n", cpu_sum2, elaps_all_5);
// free host mem
free(h_idata);
free(h_odata);
// free gpu hbm
hipFree(d_idata);
hipFree(d_odata);
// reset device
hipDeviceReset();
}
/*
device[0]: Tesla V100-SXM2-32GB
array size: 16777216
kernal size: grid(32768, 1), block(512, 1)
GpuReduceNeighbored result: 2139353471, kernal elaps: 0.000035
GpuReduceNeighboredV2 result: 2139353471, kernal elaps: 0.000017
GpuReduceInterleaved result: 2139353471, kernal elaps: 0.000011, all elaps: 0.000567
cpu normal result: 2139353471, elaps_all: 0.043164
cpu recusize result: 2139353471 elaps_all: 0.042999
*/ | d6b6a309ecda143d2239e8c80a8678e0c9a46f68.cu | #include <stdio.h>
#include <sys/time.h>
double CpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
int CpuNormalCal(int* data, const int size) {
int sum = 0;
for (int i = 0; i < size; ++i) {
sum += data[i];
}
return sum;
}
int CpuRecusiveReduce(int* data, const int size) {
//terminal check
if (size == 1) return data[0];
const int stride = size / 2;
for (int i = 0; i < stride; ++i) {
data[i] += data[i + stride];
}
return CpuRecusiveReduce(data, stride);
}
__global__ void GpuReduceNeighbored(int* g_idata, int* g_odata, unsigned int n) {
// thread id in courrent block
unsigned int tid = threadIdx.x;
// id of all thread in grid
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if (idx >= n) return;
// subarray in current block
int *indata = g_idata + blockIdx.x * blockDim.x;
// cal sum
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((tid % (2 * stride)) == 0) {
indata[tid] += indata[tid + stride];
}
__syncthreads();
}
//write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = indata[0];
}
__global__ void GpuReduceNeighboredV2(int* g_idata, int* g_odata, unsigned int n) {
// thread id in courrent block
unsigned int tid = threadIdx.x;
// id of all thread in grid
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if (idx >= n) return;
// subarray in current blocks
int *indata = g_idata + blockIdx.x * blockDim.x;
// cal sum
for (int stride = 1; stride < blockDim.x; stride *= 2) {
// convert tid into local array index (in block)
int index = 2 * stride * tid;
if (index < blockDim.x) {
indata[index] += indata[index + stride];
}
__syncthreads();
}
//write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = indata[0];
}
__global__ void GpuReduceInterleaved(int* g_idata, int* g_odata, unsigned int n) {
// thread id in courrent block
unsigned int tid = threadIdx.x;
// id of all thread in grid
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// boundary check
if (idx >= n) return;
// subarray in current block
int *indata = g_idata + blockIdx.x * blockDim.x;
// cal sum
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
indata[tid] += indata[tid + stride];
}
__syncthreads();
}
//write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = indata[0];
}
int main(int argc, char** argv) {
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("device[%d]: %s\n", dev, deviceProp.name);
int block_size = 512;
if (argc > 1) {
block_size = atoi(argv[1]);
}
int size = 1 << 24;
printf("array size: %d\n", size);
dim3 block(block_size, 1);
dim3 grid((size + block.x - 1) / block.x, 1);
printf("kernal size: grid(%d, %d), block(%d, %d)\n", grid.x, grid.y, block.x, block.y);
// alloc mem
size_t bytes = size * sizeof(int);
int* h_idata = (int*)malloc(bytes);
int* h_odata = (int*)malloc(grid.x * sizeof(int));
int* tmp = (int*)malloc(bytes);
// initialize array
for (int i = 0; i < size; ++i) {
h_idata[i] = (int) (rand() & 0xFF);
}
// alloc hbm
int* d_idata = NULL;
int* d_odata = NULL;
cudaMalloc((void**) &d_idata, bytes);
cudaMalloc((void**) &d_odata, grid.x * sizeof(int));
int gpu_sum = 0;
// ------ kernal 1 ------
// copy input data from h to d
cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice);
// cuda kernal cal
double t1 = CpuSecond();
GpuReduceNeighbored<<<grid, block>>>(d_idata, d_odata, size);
double elaps1 = CpuSecond() - t1;
// copy output data from d to h
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
// cpu cal
for (int i = 0; i < grid.x; ++i) {
gpu_sum += h_odata[i];
}
printf("GpuReduceNeighbored result: %d, kernal elaps: %f\n", gpu_sum, elaps1);
memset(h_odata, 0, grid.x * sizeof(int));
gpu_sum = 0;
// ------ kernal 2 ------
// copy input data from h to d
cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice);
// cuda kernal cal
double t2 = CpuSecond();
GpuReduceNeighboredV2<<<grid, block>>>(d_idata, d_odata, size);
double elaps2 = CpuSecond() - t2;
// copy output data from d to h
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
// cpu cal
for (int i = 0; i < grid.x; ++i) {
gpu_sum += h_odata[i];
}
printf("GpuReduceNeighboredV2 result: %d, kernal elaps: %f\n", gpu_sum, elaps2);
memset(h_odata, 0, grid.x * sizeof(int));
gpu_sum = 0;
// ------ kernal 3 ------
// copy input data from h to d
cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice);
// cuda kernal cal
double t3 = CpuSecond();
GpuReduceInterleaved<<<grid, block>>>(d_idata, d_odata, size);
double elaps3 = CpuSecond() - t3;
// copy output data from d to h
cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost);
// cpu cal
for (int i = 0; i < grid.x; ++i) {
gpu_sum += h_odata[i];
}
double elaps_all_3 = CpuSecond() - t3;
printf("GpuReduceInterleaved result: %d, kernal elaps: %f, all elaps: %f\n", gpu_sum, elaps3, elaps_all_3);
memset(h_odata, 0, grid.x * sizeof(int));
gpu_sum = 0;
memcpy(tmp, h_idata, bytes);
// ------ cpu 1 ------
double t4 = CpuSecond();
int cpu_sum1 = CpuNormalCal(tmp, size);
double elaps_all_4 = CpuSecond() - t4;
// ------ cpu 2 ------
double t5 = CpuSecond();
int cpu_sum2 = CpuRecusiveReduce(tmp, size);
double elaps_all_5 = CpuSecond() - t5;
printf("cpu normal result: %d, elaps_all: %f\n", cpu_sum1, elaps_all_4);
printf("cpu recusize result: %d, elaps_all: %f\n", cpu_sum2, elaps_all_5);
// free host mem
free(h_idata);
free(h_odata);
// free gpu hbm
cudaFree(d_idata);
cudaFree(d_odata);
// reset device
cudaDeviceReset();
}
/*
device[0]: Tesla V100-SXM2-32GB
array size: 16777216
kernal size: grid(32768, 1), block(512, 1)
GpuReduceNeighbored result: 2139353471, kernal elaps: 0.000035
GpuReduceNeighboredV2 result: 2139353471, kernal elaps: 0.000017
GpuReduceInterleaved result: 2139353471, kernal elaps: 0.000011, all elaps: 0.000567
cpu normal result: 2139353471, elaps_all: 0.043164
cpu recusize result: 2139353471, elaps_all: 0.042999
*/ |
139ac12ec884e6733f2164bb2f2ee2d5d5c6a171.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <limits>
namespace at::native {
CONSTEXPR_EXCEPT_WIN_CUDA char tanh_name[] = "tanh";
void tanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto tanh_string = jiterator_stringify(
template <typename T> T tanh(T a) { return std::tanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/tanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tanh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
}
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
} // namespace at::native
| 139ac12ec884e6733f2164bb2f2ee2d5d5c6a171.cu | #define TORCH_ASSERT_NO_OPERATORS
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/OpMathType.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <limits>
namespace at::native {
CONSTEXPR_EXCEPT_WIN_CUDA char tanh_name[] = "tanh";
void tanh_kernel_cuda(TensorIteratorBase& iter) {
auto common_dtype = iter.common_dtype();
if (at::isComplexType(common_dtype)) {
#if AT_USE_JITERATOR
static const auto tanh_string = jiterator_stringify(
template <typename T> T tanh(T a) { return std::tanh(a); });
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
jitted_gpu_kernel<
/*name=*/tanh_name,
/*return_dtype=*/scalar_t,
/*common_dtype=*/scalar_t,
/*arity=*/1>(iter, tanh_string);
});
#else
AT_DISPATCH_COMPLEX_TYPES_AND(
kComplexHalf, common_dtype, "tanh_name", [&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
using opmath_t = at::opmath_type<scalar_t>;
return ::tanh(static_cast<opmath_t>(a));
});
});
#endif
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
common_dtype,
"tanh_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::tanh(a);
});
});
}
}
REGISTER_DISPATCH(tanh_stub, &tanh_kernel_cuda);
} // namespace at::native
|
3eb332601055d6bc59fd112271b83c0361bea66f.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************************************
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "hip/hip_runtime.h"
#include "common.h"
void SendRecvGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count;
*recvcount = count;
*sendInplaceOffset = 0;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t SendRecvInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(hipSetDevice(args->gpus[i]));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(hipMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, rank*sendcount, type, ncclSum, rep, 1, 0));
int peer = (rank-1+nranks)%nranks;
TESTCHECK(InitData(args->expected[i], recvcount, peer*recvcount, type, ncclSum, rep, 1, 0));
CUDACHECK(hipDeviceSynchronize());
}
// We don't support in-place sendrecv
args->reportErrors = in_place ? 0 : 1;
return testSuccess;
}
void SendRecvGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize) / 1.0E9 / sec;
*algBw = baseBw;
double factor = 1;
*busBw = baseBw * factor;
}
testResult_t SendRecvRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, hipStream_t stream) {
int nRanks;
NCCLCHECK(ncclCommCount(comm, &nRanks));
int rank;
NCCLCHECK(ncclCommUserRank(comm, &rank));
int recvPeer = (rank-1+nRanks) % nRanks;
int sendPeer = (rank+1) % nRanks;
NCCLCHECK(ncclGroupStart());
NCCLCHECK(ncclSend(sendbuff, count, type, sendPeer, comm, stream));
NCCLCHECK(ncclRecv(recvbuff, count, type, recvPeer, comm, stream));
NCCLCHECK(ncclGroupEnd());
return testSuccess;
}
struct testColl sendRecvTest = {
"SendRecv",
SendRecvGetCollByteCount,
SendRecvInitData,
SendRecvGetBw,
SendRecvRunColl
};
void SendRecvGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
SendRecvGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t SendRecvRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &sendRecvTest;
ncclDataType_t *run_types;
ncclRedOp_t *run_ops;
const char **run_typenames, **run_opnames;
int type_count, op_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = test_typenum;
run_types = test_types;
run_typenames = test_typenames;
}
if ((int)op != -1) {
op_count = 1;
run_ops = &op;
run_opnames = &opName;
} else {
op_count = test_opnum;
run_ops = test_ops;
run_opnames = test_opnames;
}
for (int i=0; i<type_count; i++) {
for (int j=0; j<op_count; j++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], run_ops[j], run_opnames[j], -1));
}
}
return testSuccess;
}
struct testEngine sendRecvEngine = {
SendRecvGetBuffSize,
SendRecvRunTest
};
#pragma weak ncclTestEngine=sendRecvEngine
| 3eb332601055d6bc59fd112271b83c0361bea66f.cu | /*************************************************************************
* Copyright (c) 2016-2022, NVIDIA CORPORATION. All rights reserved.
*
* See LICENSE.txt for license information
************************************************************************/
#include "cuda_runtime.h"
#include "common.h"
void SendRecvGetCollByteCount(size_t *sendcount, size_t *recvcount, size_t *paramcount, size_t *sendInplaceOffset, size_t *recvInplaceOffset, size_t count, int nranks) {
*sendcount = count;
*recvcount = count;
*sendInplaceOffset = 0;
*recvInplaceOffset = 0;
*paramcount = *sendcount;
}
testResult_t SendRecvInitData(struct threadArgs* args, ncclDataType_t type, ncclRedOp_t op, int root, int rep, int in_place) {
size_t sendcount = args->sendBytes / wordSize(type);
size_t recvcount = args->expectedBytes / wordSize(type);
int nranks = args->nProcs*args->nThreads*args->nGpus;
for (int i=0; i<args->nGpus; i++) {
CUDACHECK(cudaSetDevice(args->gpus[i]));
int rank = ((args->proc*args->nThreads + args->thread)*args->nGpus + i);
CUDACHECK(cudaMemset(args->recvbuffs[i], 0, args->expectedBytes));
void* data = in_place ? args->recvbuffs[i] : args->sendbuffs[i];
TESTCHECK(InitData(data, sendcount, rank*sendcount, type, ncclSum, rep, 1, 0));
int peer = (rank-1+nranks)%nranks;
TESTCHECK(InitData(args->expected[i], recvcount, peer*recvcount, type, ncclSum, rep, 1, 0));
CUDACHECK(cudaDeviceSynchronize());
}
// We don't support in-place sendrecv
args->reportErrors = in_place ? 0 : 1;
return testSuccess;
}
void SendRecvGetBw(size_t count, int typesize, double sec, double* algBw, double* busBw, int nranks) {
double baseBw = (double)(count * typesize) / 1.0E9 / sec;
*algBw = baseBw;
double factor = 1;
*busBw = baseBw * factor;
}
testResult_t SendRecvRunColl(void* sendbuff, void* recvbuff, size_t count, ncclDataType_t type, ncclRedOp_t op, int root, ncclComm_t comm, cudaStream_t stream) {
int nRanks;
NCCLCHECK(ncclCommCount(comm, &nRanks));
int rank;
NCCLCHECK(ncclCommUserRank(comm, &rank));
int recvPeer = (rank-1+nRanks) % nRanks;
int sendPeer = (rank+1) % nRanks;
NCCLCHECK(ncclGroupStart());
NCCLCHECK(ncclSend(sendbuff, count, type, sendPeer, comm, stream));
NCCLCHECK(ncclRecv(recvbuff, count, type, recvPeer, comm, stream));
NCCLCHECK(ncclGroupEnd());
return testSuccess;
}
struct testColl sendRecvTest = {
"SendRecv",
SendRecvGetCollByteCount,
SendRecvInitData,
SendRecvGetBw,
SendRecvRunColl
};
void SendRecvGetBuffSize(size_t *sendcount, size_t *recvcount, size_t count, int nranks) {
size_t paramcount, sendInplaceOffset, recvInplaceOffset;
SendRecvGetCollByteCount(sendcount, recvcount, ¶mcount, &sendInplaceOffset, &recvInplaceOffset, count, nranks);
}
testResult_t SendRecvRunTest(struct threadArgs* args, int root, ncclDataType_t type, const char* typeName, ncclRedOp_t op, const char* opName) {
args->collTest = &sendRecvTest;
ncclDataType_t *run_types;
ncclRedOp_t *run_ops;
const char **run_typenames, **run_opnames;
int type_count, op_count;
if ((int)type != -1) {
type_count = 1;
run_types = &type;
run_typenames = &typeName;
} else {
type_count = test_typenum;
run_types = test_types;
run_typenames = test_typenames;
}
if ((int)op != -1) {
op_count = 1;
run_ops = &op;
run_opnames = &opName;
} else {
op_count = test_opnum;
run_ops = test_ops;
run_opnames = test_opnames;
}
for (int i=0; i<type_count; i++) {
for (int j=0; j<op_count; j++) {
TESTCHECK(TimeTest(args, run_types[i], run_typenames[i], run_ops[j], run_opnames[j], -1));
}
}
return testSuccess;
}
struct testEngine sendRecvEngine = {
SendRecvGetBuffSize,
SendRecvRunTest
};
#pragma weak ncclTestEngine=sendRecvEngine
|
9fb80380437f6f3eaeec9d9006997997ebe44acd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "track_ellipse_kernel.h"
#include "misc_math.h"
// #include <cutil.h>
// Constants used in the MGVF computation
#define ONE_OVER_PI (1.0 / PI)
#define MU 0.5
#define LAMBDA (8.0 * MU + 1.0)
// Host and device arrays to hold device pointers to input matrices
float **host_I_array, **host_IMGVF_array;
float **device_I_array, **device_IMGVF_array;
// Host and device arrays to hold sizes of input matrices
int *host_m_array, *host_n_array;
int *device_m_array, *device_n_array;
// Host array to hold matrices for all cells
// (so we can copy to and from the device in a single transfer)
float *host_I_all;
int total_mem_size;
// The number of threads per thread block
const int threads_per_block = 320;
// next_lowest_power_of_two = 2^(floor(log2(threads_per_block)))
const int next_lowest_power_of_two = 256;
// Regularized version of the Heaviside step function:
// He(x) = (atan(x) / pi) + 0.5
__device__ float heaviside(float x) {
return (atan(x) * ONE_OVER_PI) + 0.5;
// A simpler, faster approximation of the Heaviside function
/* float out = 0.0;
if (x > -0.0001) out = 0.5;
if (x > 0.0001) out = 1.0;
return out; */
}
// Kernel to compute the Motion Gradient Vector Field (MGVF) matrix for multiple
// cells
__global__ void IMGVF_kernel(float **IMGVF_array, float **I_array, int *m_array,
int *n_array, float vx, float vy, float e,
int max_iterations, float cutoff) {
// Shared copy of the matrix being computed
__shared__ float IMGVF[41 * 81];
// Shared buffer used for two purposes:
// 1) To temporarily store newly computed matrix values so that only
// values from the previous iteration are used in the computation.
// 2) To store partial sums during the tree reduction which is performed
// at the end of each iteration to determine if the computation has
// converged.
__shared__ float buffer[threads_per_block];
// Figure out which cell this thread block is working on
int cell_num = blockIdx.x;
// Get pointers to current cell's input image and inital matrix
float *IMGVF_global = IMGVF_array[cell_num];
float *I = I_array[cell_num];
// Get current cell's matrix dimensions
int m = m_array[cell_num];
int n = n_array[cell_num];
// Compute the number of virtual thread blocks
int max = (m * n + threads_per_block - 1) / threads_per_block;
// Load the initial IMGVF matrix into shared memory
int thread_id = threadIdx.x, thread_block, i, j;
for (thread_block = 0; thread_block < max; thread_block++) {
int offset = thread_block * threads_per_block;
i = (thread_id + offset) / n;
j = (thread_id + offset) % n;
if (i < m)
IMGVF[(i * n) + j] = IMGVF_global[(i * n) + j];
}
__syncthreads();
// Set the converged flag to false
__shared__ int cell_converged;
if (threadIdx.x == 0)
cell_converged = 0;
__syncthreads();
// Constants used to iterate through virtual thread blocks
const float one_nth = 1.f / (float)n;
const int tid_mod = thread_id % n;
const int tbsize_mod = threads_per_block % n;
// Constant used in the computation of Heaviside values
float one_over_e = 1.0 / e;
// Iteratively compute the IMGVF matrix until the computation has
// converged or we have reached the maximum number of iterations
int iterations = 0;
while ((!cell_converged) && (iterations < max_iterations)) {
// The total change to this thread's matrix elements in the current
// iteration
float total_diff = 0.0f;
int old_i = 0, old_j = 0;
j = tid_mod - tbsize_mod;
// Iterate over virtual thread blocks
for (thread_block = 0; thread_block < max; thread_block++) {
// Store the index of this thread's previous matrix element
// (used in the buffering scheme below)
old_i = i;
old_j = j;
// Determine the index of this thread's current matrix element
int offset = thread_block * threads_per_block;
i = (thread_id + offset) * one_nth;
j += tbsize_mod;
if (j >= n)
j -= n;
float new_val = 0.0, old_val = 0.0;
// Make sure the thread has not gone off the end of the matrix
if (i < m) {
// Compute neighboring matrix element indices
int rowU = (i == 0) ? 0 : i - 1;
int rowD = (i == m - 1) ? m - 1 : i + 1;
int colL = (j == 0) ? 0 : j - 1;
int colR = (j == n - 1) ? n - 1 : j + 1;
// Compute the difference between the matrix element and its
// eight neighbors
old_val = IMGVF[(i * n) + j];
float U = IMGVF[(rowU * n) + j] - old_val;
float D = IMGVF[(rowD * n) + j] - old_val;
float L = IMGVF[(i * n) + colL] - old_val;
float R = IMGVF[(i * n) + colR] - old_val;
float UR = IMGVF[(rowU * n) + colR] - old_val;
float DR = IMGVF[(rowD * n) + colR] - old_val;
float UL = IMGVF[(rowU * n) + colL] - old_val;
float DL = IMGVF[(rowD * n) + colL] - old_val;
// Compute the regularized heaviside value for these differences
float UHe = heaviside((U * -vy) * one_over_e);
float DHe = heaviside((D * vy) * one_over_e);
float LHe = heaviside((L * -vx) * one_over_e);
float RHe = heaviside((R * vx) * one_over_e);
float URHe = heaviside((UR * (vx - vy)) * one_over_e);
float DRHe = heaviside((DR * (vx + vy)) * one_over_e);
float ULHe = heaviside((UL * (-vx - vy)) * one_over_e);
float DLHe = heaviside((DL * (-vx + vy)) * one_over_e);
// Update the IMGVF value in two steps:
// 1) Compute IMGVF += (mu / lambda)(UHe .*U + DHe .*D + LHe
// .*L + RHe .*R +
// URHe.*UR + DRHe.*DR +
// ULHe.*UL + DLHe.*DL);
new_val = old_val +
(MU / LAMBDA) *
(UHe * U + DHe * D + LHe * L + RHe * R +
URHe * UR + DRHe * DR + ULHe * UL + DLHe * DL);
// 2) Compute IMGVF -= (1 / lambda)(I .* (IMGVF - I))
float vI = I[(i * n) + j];
new_val -= ((1.0 / LAMBDA) * vI * (new_val - vI));
}
__syncthreads();
// Save the previous virtual thread block's value (if it exists)
if (thread_block > 0) {
offset = (thread_block - 1) * threads_per_block;
if (old_i < m)
IMGVF[(old_i * n) + old_j] = buffer[thread_id];
}
if (thread_block < max - 1) {
// Write the new value to the buffer
buffer[thread_id] = new_val;
} else {
// We've reached the final virtual thread block,
// so write directly to the matrix
if (i < m)
IMGVF[(i * n) + j] = new_val;
}
// Keep track of the total change of this thread's matrix elements
total_diff += fabs(new_val - old_val);
// We need to synchronize between virtual thread blocks to prevent
// threads from writing the values from the buffer to the actual
// IMGVF matrix too early
__syncthreads();
}
// We need to compute the overall sum of the change at each matrix
// element
// by performing a tree reduction across the whole threadblock
buffer[thread_id] = total_diff;
__syncthreads();
// Account for thread block sizes that are not a power of 2
if (thread_id >= next_lowest_power_of_two) {
buffer[thread_id - next_lowest_power_of_two] += buffer[thread_id];
}
__syncthreads();
// Perform the tree reduction
int th;
for (th = next_lowest_power_of_two / 2; th > 0; th /= 2) {
if (thread_id < th) {
buffer[thread_id] += buffer[thread_id + th];
}
__syncthreads();
}
// Figure out if we have converged
if (thread_id == 0) {
float mean = buffer[thread_id] / (float)(m * n);
if (mean < cutoff) {
// We have converged, so set the appropriate flag
cell_converged = 1;
}
}
// We need to synchronize to ensure that all threads
// read the correct value of the convergence flag
__syncthreads();
// Keep track of the number of iterations we have performed
iterations++;
}
// Save the final IMGVF matrix to global memory
for (thread_block = 0; thread_block < max; thread_block++) {
int offset = thread_block * threads_per_block;
i = (thread_id + offset) / n;
j = (thread_id + offset) % n;
if (i < m)
IMGVF_global[(i * n) + j] = IMGVF[(i * n) + j];
}
}
// Host function that launches a CUDA kernel to compute the MGVF matrices for
// the specified cells
void IMGVF_cuda(MAT **I, MAT **IMGVF, double vx, double vy, double e,
int max_iterations, double cutoff, int num_cells) {
// Initialize the data on the GPU
IMGVF_cuda_init(I, num_cells);
// Compute the MGVF on the GPU
hipLaunchKernelGGL(( IMGVF_kernel), dim3(num_cells), dim3(threads_per_block), 0, 0,
device_IMGVF_array, device_I_array, device_m_array, device_n_array,
(float)vx, (float)vy, (float)e, max_iterations, (float)cutoff);
// Check for kernel errors
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if (error != hipSuccess) {
printf("MGVF kernel error: %s\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy back the final results from the GPU
IMGVF_cuda_cleanup(IMGVF, num_cells);
}
// Initializes data on the GPU for the MGVF kernel
void IMGVF_cuda_init(MAT **IE, int num_cells) {
// Allocate arrays of pointers to device memory
host_I_array = (float **)malloc(sizeof(float *) * num_cells);
host_IMGVF_array = (float **)malloc(sizeof(float *) * num_cells);
hipMalloc((void **)&device_I_array, num_cells * sizeof(float *));
hipMalloc((void **)&device_IMGVF_array, num_cells * sizeof(float *));
// Allocate arrays of memory dimensions
host_m_array = (int *)malloc(sizeof(int) * num_cells);
host_n_array = (int *)malloc(sizeof(int) * num_cells);
hipMalloc((void **)&device_m_array, num_cells * sizeof(int));
hipMalloc((void **)&device_n_array, num_cells * sizeof(int));
// Figure out the size of all of the matrices combined
int i, j, cell_num;
int total_size = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *I = IE[cell_num];
int size = I->m * I->n;
total_size += size;
}
total_mem_size = total_size * sizeof(float);
// Allocate host memory just once for all cells
host_I_all = (float *)malloc(total_mem_size);
// Allocate device memory just once for all cells
float *device_I_all, *device_IMGVF_all;
hipMalloc((void **)&device_I_all, total_mem_size);
hipMalloc((void **)&device_IMGVF_all, total_mem_size);
// Copy each initial matrix into the allocated host memory
int offset = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *I = IE[cell_num];
// Determine the size of the matrix
int m = I->m, n = I->n;
int size = m * n;
// Store memory dimensions
host_m_array[cell_num] = m;
host_n_array[cell_num] = n;
// Store pointers to allocated memory
float *device_I = &(device_I_all[offset]);
float *device_IMGVF = &(device_IMGVF_all[offset]);
host_I_array[cell_num] = device_I;
host_IMGVF_array[cell_num] = device_IMGVF;
// Copy matrix I (which is also the initial IMGVF matrix) into the
// overall array
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
host_I_all[offset + (i * n) + j] = (float)m_get_val(I, i, j);
offset += size;
}
// Copy I matrices (which are also the initial IMGVF matrices) to device
hipMemcpy(device_I_all, host_I_all, total_mem_size,
hipMemcpyHostToDevice);
hipMemcpy(device_IMGVF_all, host_I_all, total_mem_size,
hipMemcpyHostToDevice);
// Copy pointer arrays to device
hipMemcpy(device_I_array, host_I_array, num_cells * sizeof(float *),
hipMemcpyHostToDevice);
hipMemcpy(device_IMGVF_array, host_IMGVF_array,
num_cells * sizeof(float *), hipMemcpyHostToDevice);
// Copy memory dimension arrays to device
hipMemcpy(device_m_array, host_m_array, num_cells * sizeof(int),
hipMemcpyHostToDevice);
hipMemcpy(device_n_array, host_n_array, num_cells * sizeof(int),
hipMemcpyHostToDevice);
}
// Copies the results of the MGVF kernel back to the host
void IMGVF_cuda_cleanup(MAT **IMGVF_out_array, int num_cells) {
// Copy the result matrices from the device to the host
hipMemcpy(host_I_all, host_IMGVF_array[0], total_mem_size,
hipMemcpyDeviceToHost);
// Copy each result matrix into its appropriate host matrix
int cell_num, offset = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *IMGVF_out = IMGVF_out_array[cell_num];
// Determine the size of the matrix
int m = IMGVF_out->m, n = IMGVF_out->n, i, j;
// Pack the result into the matrix
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
m_set_val(IMGVF_out, i, j,
(double)host_I_all[offset + (i * n) + j]);
offset += (m * n);
}
// Free device memory
hipFree(device_m_array);
hipFree(device_n_array);
hipFree(device_IMGVF_array);
hipFree(device_I_array);
hipFree(host_IMGVF_array[0]);
hipFree(host_I_array[0]);
// Free host memory
free(host_m_array);
free(host_n_array);
free(host_IMGVF_array);
free(host_I_array);
free(host_I_all);
}
| 9fb80380437f6f3eaeec9d9006997997ebe44acd.cu | #include "track_ellipse_kernel.h"
#include "misc_math.h"
// #include <cutil.h>
// Constants used in the MGVF computation
#define ONE_OVER_PI (1.0 / PI)
#define MU 0.5
#define LAMBDA (8.0 * MU + 1.0)
// Host and device arrays to hold device pointers to input matrices
float **host_I_array, **host_IMGVF_array;
float **device_I_array, **device_IMGVF_array;
// Host and device arrays to hold sizes of input matrices
int *host_m_array, *host_n_array;
int *device_m_array, *device_n_array;
// Host array to hold matrices for all cells
// (so we can copy to and from the device in a single transfer)
float *host_I_all;
int total_mem_size;
// The number of threads per thread block
const int threads_per_block = 320;
// next_lowest_power_of_two = 2^(floor(log2(threads_per_block)))
const int next_lowest_power_of_two = 256;
// Regularized version of the Heaviside step function:
// He(x) = (atan(x) / pi) + 0.5
__device__ float heaviside(float x) {
return (atan(x) * ONE_OVER_PI) + 0.5;
// A simpler, faster approximation of the Heaviside function
/* float out = 0.0;
if (x > -0.0001) out = 0.5;
if (x > 0.0001) out = 1.0;
return out; */
}
// Kernel to compute the Motion Gradient Vector Field (MGVF) matrix for multiple
// cells
__global__ void IMGVF_kernel(float **IMGVF_array, float **I_array, int *m_array,
int *n_array, float vx, float vy, float e,
int max_iterations, float cutoff) {
// Shared copy of the matrix being computed
__shared__ float IMGVF[41 * 81];
// Shared buffer used for two purposes:
// 1) To temporarily store newly computed matrix values so that only
// values from the previous iteration are used in the computation.
// 2) To store partial sums during the tree reduction which is performed
// at the end of each iteration to determine if the computation has
// converged.
__shared__ float buffer[threads_per_block];
// Figure out which cell this thread block is working on
int cell_num = blockIdx.x;
// Get pointers to current cell's input image and inital matrix
float *IMGVF_global = IMGVF_array[cell_num];
float *I = I_array[cell_num];
// Get current cell's matrix dimensions
int m = m_array[cell_num];
int n = n_array[cell_num];
// Compute the number of virtual thread blocks
int max = (m * n + threads_per_block - 1) / threads_per_block;
// Load the initial IMGVF matrix into shared memory
int thread_id = threadIdx.x, thread_block, i, j;
for (thread_block = 0; thread_block < max; thread_block++) {
int offset = thread_block * threads_per_block;
i = (thread_id + offset) / n;
j = (thread_id + offset) % n;
if (i < m)
IMGVF[(i * n) + j] = IMGVF_global[(i * n) + j];
}
__syncthreads();
// Set the converged flag to false
__shared__ int cell_converged;
if (threadIdx.x == 0)
cell_converged = 0;
__syncthreads();
// Constants used to iterate through virtual thread blocks
const float one_nth = 1.f / (float)n;
const int tid_mod = thread_id % n;
const int tbsize_mod = threads_per_block % n;
// Constant used in the computation of Heaviside values
float one_over_e = 1.0 / e;
// Iteratively compute the IMGVF matrix until the computation has
// converged or we have reached the maximum number of iterations
int iterations = 0;
while ((!cell_converged) && (iterations < max_iterations)) {
// The total change to this thread's matrix elements in the current
// iteration
float total_diff = 0.0f;
int old_i = 0, old_j = 0;
j = tid_mod - tbsize_mod;
// Iterate over virtual thread blocks
for (thread_block = 0; thread_block < max; thread_block++) {
// Store the index of this thread's previous matrix element
// (used in the buffering scheme below)
old_i = i;
old_j = j;
// Determine the index of this thread's current matrix element
int offset = thread_block * threads_per_block;
i = (thread_id + offset) * one_nth;
j += tbsize_mod;
if (j >= n)
j -= n;
float new_val = 0.0, old_val = 0.0;
// Make sure the thread has not gone off the end of the matrix
if (i < m) {
// Compute neighboring matrix element indices
int rowU = (i == 0) ? 0 : i - 1;
int rowD = (i == m - 1) ? m - 1 : i + 1;
int colL = (j == 0) ? 0 : j - 1;
int colR = (j == n - 1) ? n - 1 : j + 1;
// Compute the difference between the matrix element and its
// eight neighbors
old_val = IMGVF[(i * n) + j];
float U = IMGVF[(rowU * n) + j] - old_val;
float D = IMGVF[(rowD * n) + j] - old_val;
float L = IMGVF[(i * n) + colL] - old_val;
float R = IMGVF[(i * n) + colR] - old_val;
float UR = IMGVF[(rowU * n) + colR] - old_val;
float DR = IMGVF[(rowD * n) + colR] - old_val;
float UL = IMGVF[(rowU * n) + colL] - old_val;
float DL = IMGVF[(rowD * n) + colL] - old_val;
// Compute the regularized heaviside value for these differences
float UHe = heaviside((U * -vy) * one_over_e);
float DHe = heaviside((D * vy) * one_over_e);
float LHe = heaviside((L * -vx) * one_over_e);
float RHe = heaviside((R * vx) * one_over_e);
float URHe = heaviside((UR * (vx - vy)) * one_over_e);
float DRHe = heaviside((DR * (vx + vy)) * one_over_e);
float ULHe = heaviside((UL * (-vx - vy)) * one_over_e);
float DLHe = heaviside((DL * (-vx + vy)) * one_over_e);
// Update the IMGVF value in two steps:
// 1) Compute IMGVF += (mu / lambda)(UHe .*U + DHe .*D + LHe
// .*L + RHe .*R +
// URHe.*UR + DRHe.*DR +
// ULHe.*UL + DLHe.*DL);
new_val = old_val +
(MU / LAMBDA) *
(UHe * U + DHe * D + LHe * L + RHe * R +
URHe * UR + DRHe * DR + ULHe * UL + DLHe * DL);
// 2) Compute IMGVF -= (1 / lambda)(I .* (IMGVF - I))
float vI = I[(i * n) + j];
new_val -= ((1.0 / LAMBDA) * vI * (new_val - vI));
}
__syncthreads();
// Save the previous virtual thread block's value (if it exists)
if (thread_block > 0) {
offset = (thread_block - 1) * threads_per_block;
if (old_i < m)
IMGVF[(old_i * n) + old_j] = buffer[thread_id];
}
if (thread_block < max - 1) {
// Write the new value to the buffer
buffer[thread_id] = new_val;
} else {
// We've reached the final virtual thread block,
// so write directly to the matrix
if (i < m)
IMGVF[(i * n) + j] = new_val;
}
// Keep track of the total change of this thread's matrix elements
total_diff += fabs(new_val - old_val);
// We need to synchronize between virtual thread blocks to prevent
// threads from writing the values from the buffer to the actual
// IMGVF matrix too early
__syncthreads();
}
// We need to compute the overall sum of the change at each matrix
// element
// by performing a tree reduction across the whole threadblock
buffer[thread_id] = total_diff;
__syncthreads();
// Account for thread block sizes that are not a power of 2
if (thread_id >= next_lowest_power_of_two) {
buffer[thread_id - next_lowest_power_of_two] += buffer[thread_id];
}
__syncthreads();
// Perform the tree reduction
int th;
for (th = next_lowest_power_of_two / 2; th > 0; th /= 2) {
if (thread_id < th) {
buffer[thread_id] += buffer[thread_id + th];
}
__syncthreads();
}
// Figure out if we have converged
if (thread_id == 0) {
float mean = buffer[thread_id] / (float)(m * n);
if (mean < cutoff) {
// We have converged, so set the appropriate flag
cell_converged = 1;
}
}
// We need to synchronize to ensure that all threads
// read the correct value of the convergence flag
__syncthreads();
// Keep track of the number of iterations we have performed
iterations++;
}
// Save the final IMGVF matrix to global memory
for (thread_block = 0; thread_block < max; thread_block++) {
int offset = thread_block * threads_per_block;
i = (thread_id + offset) / n;
j = (thread_id + offset) % n;
if (i < m)
IMGVF_global[(i * n) + j] = IMGVF[(i * n) + j];
}
}
// Host function that launches a CUDA kernel to compute the MGVF matrices for
// the specified cells
void IMGVF_cuda(MAT **I, MAT **IMGVF, double vx, double vy, double e,
int max_iterations, double cutoff, int num_cells) {
// Initialize the data on the GPU
IMGVF_cuda_init(I, num_cells);
// Compute the MGVF on the GPU
IMGVF_kernel<<<num_cells, threads_per_block>>>(
device_IMGVF_array, device_I_array, device_m_array, device_n_array,
(float)vx, (float)vy, (float)e, max_iterations, (float)cutoff);
// Check for kernel errors
cudaThreadSynchronize();
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("MGVF kernel error: %s\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Copy back the final results from the GPU
IMGVF_cuda_cleanup(IMGVF, num_cells);
}
// Initializes data on the GPU for the MGVF kernel
void IMGVF_cuda_init(MAT **IE, int num_cells) {
// Allocate arrays of pointers to device memory
host_I_array = (float **)malloc(sizeof(float *) * num_cells);
host_IMGVF_array = (float **)malloc(sizeof(float *) * num_cells);
cudaMalloc((void **)&device_I_array, num_cells * sizeof(float *));
cudaMalloc((void **)&device_IMGVF_array, num_cells * sizeof(float *));
// Allocate arrays of memory dimensions
host_m_array = (int *)malloc(sizeof(int) * num_cells);
host_n_array = (int *)malloc(sizeof(int) * num_cells);
cudaMalloc((void **)&device_m_array, num_cells * sizeof(int));
cudaMalloc((void **)&device_n_array, num_cells * sizeof(int));
// Figure out the size of all of the matrices combined
int i, j, cell_num;
int total_size = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *I = IE[cell_num];
int size = I->m * I->n;
total_size += size;
}
total_mem_size = total_size * sizeof(float);
// Allocate host memory just once for all cells
host_I_all = (float *)malloc(total_mem_size);
// Allocate device memory just once for all cells
float *device_I_all, *device_IMGVF_all;
cudaMalloc((void **)&device_I_all, total_mem_size);
cudaMalloc((void **)&device_IMGVF_all, total_mem_size);
// Copy each initial matrix into the allocated host memory
int offset = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *I = IE[cell_num];
// Determine the size of the matrix
int m = I->m, n = I->n;
int size = m * n;
// Store memory dimensions
host_m_array[cell_num] = m;
host_n_array[cell_num] = n;
// Store pointers to allocated memory
float *device_I = &(device_I_all[offset]);
float *device_IMGVF = &(device_IMGVF_all[offset]);
host_I_array[cell_num] = device_I;
host_IMGVF_array[cell_num] = device_IMGVF;
// Copy matrix I (which is also the initial IMGVF matrix) into the
// overall array
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
host_I_all[offset + (i * n) + j] = (float)m_get_val(I, i, j);
offset += size;
}
// Copy I matrices (which are also the initial IMGVF matrices) to device
cudaMemcpy(device_I_all, host_I_all, total_mem_size,
cudaMemcpyHostToDevice);
cudaMemcpy(device_IMGVF_all, host_I_all, total_mem_size,
cudaMemcpyHostToDevice);
// Copy pointer arrays to device
cudaMemcpy(device_I_array, host_I_array, num_cells * sizeof(float *),
cudaMemcpyHostToDevice);
cudaMemcpy(device_IMGVF_array, host_IMGVF_array,
num_cells * sizeof(float *), cudaMemcpyHostToDevice);
// Copy memory dimension arrays to device
cudaMemcpy(device_m_array, host_m_array, num_cells * sizeof(int),
cudaMemcpyHostToDevice);
cudaMemcpy(device_n_array, host_n_array, num_cells * sizeof(int),
cudaMemcpyHostToDevice);
}
// Copies the results of the MGVF kernel back to the host
void IMGVF_cuda_cleanup(MAT **IMGVF_out_array, int num_cells) {
// Copy the result matrices from the device to the host
cudaMemcpy(host_I_all, host_IMGVF_array[0], total_mem_size,
cudaMemcpyDeviceToHost);
// Copy each result matrix into its appropriate host matrix
int cell_num, offset = 0;
for (cell_num = 0; cell_num < num_cells; cell_num++) {
MAT *IMGVF_out = IMGVF_out_array[cell_num];
// Determine the size of the matrix
int m = IMGVF_out->m, n = IMGVF_out->n, i, j;
// Pack the result into the matrix
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
m_set_val(IMGVF_out, i, j,
(double)host_I_all[offset + (i * n) + j]);
offset += (m * n);
}
// Free device memory
cudaFree(device_m_array);
cudaFree(device_n_array);
cudaFree(device_IMGVF_array);
cudaFree(device_I_array);
cudaFree(host_IMGVF_array[0]);
cudaFree(host_I_array[0]);
// Free host memory
free(host_m_array);
free(host_n_array);
free(host_IMGVF_array);
free(host_I_array);
free(host_I_all);
}
|
bb7185d47f439b83f14ce24c1ade3d3efb9ddc2f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "common.h"
__global__ void reduction_neighbor_pair(int *input, int *temp, int size){
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid > size){
return;
}
for(int offset = blockDim.x / 2; offset > 0; offset = offset /2){
if(tid < offset){
input[gid] += input[gid + offset];
}
__syncthreads();
}
if(tid == 0){
temp[blockIdx.x] = input[gid];
}
}
int main(int argc, char **argv){
printf("Running neighboring reduction pair kernel\n");
int size = 1 << 27;
int byte_size = size * sizeof(int);
int block_size = 128;
int *h_input, *h_ref;
h_input = (int *) malloc(byte_size);
initialize(h_input, size, INIT_RANDOM);
// get reduction result from CPU
int cpu_result = reduction_cpu(h_input, size);
dim3 block(block_size);
dim3 grid(size/block.x);
printf("Kernel launch params:\n grid.x: %d, block.x: %d\n", grid.x, block.x);
int temp_array_byte_size = sizeof(int) * grid.x;
h_ref = (int *) malloc(temp_array_byte_size);
int *d_input, *d_temp;
hipMalloc((void **)&d_input, byte_size);
hipMalloc((void **)&d_temp, temp_array_byte_size);
hipMemset(d_temp, 0, temp_array_byte_size);
hipMemcpy(d_input, h_input, byte_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( reduction_neighbor_pair) , dim3(grid), dim3(block), 0, 0, d_input, d_temp, size);
hipDeviceSynchronize();
hipMemcpy(h_ref, d_temp, temp_array_byte_size, hipMemcpyDeviceToHost);
int gpu_result = 0;
for(int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
compare_results(gpu_result, cpu_result);
free(h_ref);
free(h_input);
hipFree(d_temp);
hipFree(d_input);
hipDeviceReset();
return 0;
}
| bb7185d47f439b83f14ce24c1ade3d3efb9ddc2f.cu | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "common.h"
__global__ void reduction_neighbor_pair(int *input, int *temp, int size){
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid > size){
return;
}
for(int offset = blockDim.x / 2; offset > 0; offset = offset /2){
if(tid < offset){
input[gid] += input[gid + offset];
}
__syncthreads();
}
if(tid == 0){
temp[blockIdx.x] = input[gid];
}
}
int main(int argc, char **argv){
printf("Running neighboring reduction pair kernel\n");
int size = 1 << 27;
int byte_size = size * sizeof(int);
int block_size = 128;
int *h_input, *h_ref;
h_input = (int *) malloc(byte_size);
initialize(h_input, size, INIT_RANDOM);
// get reduction result from CPU
int cpu_result = reduction_cpu(h_input, size);
dim3 block(block_size);
dim3 grid(size/block.x);
printf("Kernel launch params:\n grid.x: %d, block.x: %d\n", grid.x, block.x);
int temp_array_byte_size = sizeof(int) * grid.x;
h_ref = (int *) malloc(temp_array_byte_size);
int *d_input, *d_temp;
cudaMalloc((void **)&d_input, byte_size);
cudaMalloc((void **)&d_temp, temp_array_byte_size);
cudaMemset(d_temp, 0, temp_array_byte_size);
cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice);
reduction_neighbor_pair <<<grid, block>>>(d_input, d_temp, size);
cudaDeviceSynchronize();
cudaMemcpy(h_ref, d_temp, temp_array_byte_size, cudaMemcpyDeviceToHost);
int gpu_result = 0;
for(int i = 0; i < grid.x; i++) {
gpu_result += h_ref[i];
}
compare_results(gpu_result, cpu_result);
free(h_ref);
free(h_input);
cudaFree(d_temp);
cudaFree(d_input);
cudaDeviceReset();
return 0;
}
|
4d80e9b318ea5c49038cfb7b212420e12a65b32c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Template code for convolution. CS6023, IITM */
#include<stdio.h>
#include<cuda.h>
#include<math.h>
#define W 1024 // Input DIM
#define OW (W-4) // Output DIM
#define D 8 // Input and Kernel Depth
#define T 5 // Kernel DIM
#define N 128 // Number of kernels
void fillMatrix(unsigned char *matrix){
unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix;
for(int i=0;i<W;i++){
for(int j=0;j<W;j++){
for(int k=0;k<D;k++){
m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255;
}
}
}
}
void fillKernel(float *kernel){
float (*t)[T][T][D]=(float (*)[T][T][D])kernel;
for(int i=0;i<N;i++){
for(int j=0;j<T;j++){
for(int k=0;k<T;k++){
for(int l=0;l<D;l++){
t[i][j][k][l]=fmod(-(i+1)*2.1+(j+1)*3.2-(k+1)*4.8+(l+1)*7.1,1.0);
}
}
}
}
}
void print_matrix_to_file(float *m){
const char *fname = "assignment4_out";
FILE *f = fopen(fname, "w");
float (*mat)[OW][OW]=(float (*)[OW][OW])m;
for(unsigned i=0; i < N; i++) {
for(unsigned j=0; j < OW; j++)
for(unsigned k=0;k<OW;k++)
fprintf(f,"%4f ", mat[i][j][k]);
fprintf(f,"\n");
}
fclose(f);
}
__global__ void conv(unsigned char *matrix,float *tile,float *output){
int filter=blockIdx.x;
int eX=blockIdx.y;
int eY=threadIdx.x;
unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix;
float (*t)[T][T][D]=(float (*)[T][T][D])tile;
float (*o)[OW][OW]=(float (*)[OW][OW])output;
__shared__ unsigned char slice[W][D];
float psum;
if(eX<2||eX>W-3) return;
for(int j=0;j<T;j++){
for(int i=0;i<D;i++){
slice[eY][i]=m[(eX+j-2)][eY][i];
}
__syncthreads();
psum=0.0f;
if(!(eY<2||eY>W-3)){
for(int k=0;k<T;k++){
for(int l=0;l<D;l++){
psum+=t[filter][j][k][l]*slice[eY+k-2][l];
}
}
atomicAdd(&o[filter][(eX-2)][eY-2],psum);
}
__syncthreads();
}
}
int main()
{
unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D);
float *kernel=(float*)malloc(sizeof(float)*T*T*D*N);
float *output=(float *)malloc(sizeof(float)*N*OW*OW);
fillMatrix(matrix);
fillKernel(kernel);
unsigned char *Dmatrix;hipMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D);
float *Dkernel;hipMalloc(&Dkernel,sizeof(float)*N*T*T*D);
float *Doutput;hipMalloc(&Doutput,sizeof(float)*N*OW*OW);
hipMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,hipMemcpyHostToDevice);
hipMemcpy(Dkernel, kernel, sizeof(float)*T*T*D*N,hipMemcpyHostToDevice);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float milliseconds = 0;
hipEventRecord(start,0);
//Make your cuda kernel call
hipLaunchKernelGGL(( conv), dim3(dim3(N,W)),dim3(W), 0, 0, Dmatrix,Dkernel,Doutput);
hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&milliseconds, start, stop);
printf("%f\n",milliseconds);
hipMemcpy(output, Doutput, sizeof(float)*N*OW*OW,hipMemcpyDeviceToHost);
//Use print_matrix_to_file function only
print_matrix_to_file(output);
}
| 4d80e9b318ea5c49038cfb7b212420e12a65b32c.cu | /*
Template code for convolution. CS6023, IITM */
#include<stdio.h>
#include<cuda.h>
#include<math.h>
#define W 1024 // Input DIM
#define OW (W-4) // Output DIM
#define D 8 // Input and Kernel Depth
#define T 5 // Kernel DIM
#define N 128 // Number of kernels
void fillMatrix(unsigned char *matrix){
unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix;
for(int i=0;i<W;i++){
for(int j=0;j<W;j++){
for(int k=0;k<D;k++){
m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255;
}
}
}
}
void fillKernel(float *kernel){
float (*t)[T][T][D]=(float (*)[T][T][D])kernel;
for(int i=0;i<N;i++){
for(int j=0;j<T;j++){
for(int k=0;k<T;k++){
for(int l=0;l<D;l++){
t[i][j][k][l]=fmod(-(i+1)*2.1+(j+1)*3.2-(k+1)*4.8+(l+1)*7.1,1.0);
}
}
}
}
}
void print_matrix_to_file(float *m){
const char *fname = "assignment4_out";
FILE *f = fopen(fname, "w");
float (*mat)[OW][OW]=(float (*)[OW][OW])m;
for(unsigned i=0; i < N; i++) {
for(unsigned j=0; j < OW; j++)
for(unsigned k=0;k<OW;k++)
fprintf(f,"%4f ", mat[i][j][k]);
fprintf(f,"\n");
}
fclose(f);
}
__global__ void conv(unsigned char *matrix,float *tile,float *output){
int filter=blockIdx.x;
int eX=blockIdx.y;
int eY=threadIdx.x;
unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix;
float (*t)[T][T][D]=(float (*)[T][T][D])tile;
float (*o)[OW][OW]=(float (*)[OW][OW])output;
__shared__ unsigned char slice[W][D];
float psum;
if(eX<2||eX>W-3) return;
for(int j=0;j<T;j++){
for(int i=0;i<D;i++){
slice[eY][i]=m[(eX+j-2)][eY][i];
}
__syncthreads();
psum=0.0f;
if(!(eY<2||eY>W-3)){
for(int k=0;k<T;k++){
for(int l=0;l<D;l++){
psum+=t[filter][j][k][l]*slice[eY+k-2][l];
}
}
atomicAdd(&o[filter][(eX-2)][eY-2],psum);
}
__syncthreads();
}
}
int main()
{
unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D);
float *kernel=(float*)malloc(sizeof(float)*T*T*D*N);
float *output=(float *)malloc(sizeof(float)*N*OW*OW);
fillMatrix(matrix);
fillKernel(kernel);
unsigned char *Dmatrix;cudaMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D);
float *Dkernel;cudaMalloc(&Dkernel,sizeof(float)*N*T*T*D);
float *Doutput;cudaMalloc(&Doutput,sizeof(float)*N*OW*OW);
cudaMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,cudaMemcpyHostToDevice);
cudaMemcpy(Dkernel, kernel, sizeof(float)*T*T*D*N,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
cudaEventRecord(start,0);
//Make your cuda kernel call
conv<<<dim3(N,W),W>>>(Dmatrix,Dkernel,Doutput);
cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f\n",milliseconds);
cudaMemcpy(output, Doutput, sizeof(float)*N*OW*OW,cudaMemcpyDeviceToHost);
//Use print_matrix_to_file function only
print_matrix_to_file(output);
}
|
7a09241b3113de8f029551c9e86f58f04838af80.hip | // !!! This is a file automatically generated by hipify!!!
// modified from https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_adam.cu
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
typedef enum
{
ADAM_MODE_0 = 0, // L2 regularization mode
ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW)
} adamMode_t;
using MATH_T = float;
template <typename T>
struct AdamFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int *noop_gmem,
TensorListMetadata<4> &tl,
const float beta1,
const float beta2,
const float beta1_correction,
const float beta2_correction,
const float epsilon,
const float lr,
adamMode_t mode,
const float decay)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
// potentially use to pass in list of scalar
// int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T *g = (T *)tl.addresses[0][tensor_loc];
g += chunk_idx * chunk_size;
T *p = (T *)tl.addresses[1][tensor_loc];
p += chunk_idx * chunk_size;
T *m = (T *)tl.addresses[2][tensor_loc];
m += chunk_idx * chunk_size;
T *v = (T *)tl.addresses[3][tensor_loc];
v += chunk_idx * chunk_size;
n -= chunk_idx * chunk_size;
// see note in multi_tensor_scale_kernel.cu
for (int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x * ILP)
{
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for (int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size)
{
r_g[ii] = g[i];
r_p[ii] = p[i];
r_m[ii] = m[i];
r_v[ii] = v[i];
}
else
{
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for (int ii = 0; ii < ILP; ii++)
{
if (mode == ADAM_MODE_0)
{ // L2
r_g[ii] = r_g[ii] + (decay * r_p[ii]);
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = next_m_unbiased / denom;
r_p[ii] = r_p[ii] - (lr * update);
}
else
{ // weight decay
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
r_p[ii] = r_p[ii] - (lr * update);
}
}
#pragma unroll
for (int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size)
{
p[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
};
void multi_tensor_adam_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
const float lr,
const float beta1,
const float beta2,
const float epsilon,
const int step,
const int mode,
const int bias_correction,
const float weight_decay)
{
using namespace at;
// Handle bias correction mode
float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
if (bias_correction == 1)
{
bias_correction1 = 1 - ::pow(beta1, step);
bias_correction2 = 1 - ::pow(beta2, step);
}
// Assume single type across p,g,m1,m2 now
DISPATCH_DOUBLE_FLOAT_AND_HALF(
tensor_lists[0][0].scalar_type(), 0, "adam",
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
AdamFunctor<scalar_t_0>(),
beta1,
beta2,
bias_correction1,
bias_correction2,
epsilon,
lr,
(adamMode_t)mode,
weight_decay);)
AT_CUDA_CHECK(hipGetLastError());
} | 7a09241b3113de8f029551c9e86f58f04838af80.cu | // modified from https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_adam.cu
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
typedef enum
{
ADAM_MODE_0 = 0, // L2 regularization mode
ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW)
} adamMode_t;
using MATH_T = float;
template <typename T>
struct AdamFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int *noop_gmem,
TensorListMetadata<4> &tl,
const float beta1,
const float beta2,
const float beta1_correction,
const float beta2_correction,
const float epsilon,
const float lr,
adamMode_t mode,
const float decay)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
// potentially use to pass in list of scalar
// int tensor_num = tl.start_tensor_this_launch + tensor_loc;
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T *g = (T *)tl.addresses[0][tensor_loc];
g += chunk_idx * chunk_size;
T *p = (T *)tl.addresses[1][tensor_loc];
p += chunk_idx * chunk_size;
T *m = (T *)tl.addresses[2][tensor_loc];
m += chunk_idx * chunk_size;
T *v = (T *)tl.addresses[3][tensor_loc];
v += chunk_idx * chunk_size;
n -= chunk_idx * chunk_size;
// see note in multi_tensor_scale_kernel.cu
for (int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x * ILP)
{
MATH_T r_g[ILP];
MATH_T r_p[ILP];
MATH_T r_m[ILP];
MATH_T r_v[ILP];
#pragma unroll
for (int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size)
{
r_g[ii] = g[i];
r_p[ii] = p[i];
r_m[ii] = m[i];
r_v[ii] = v[i];
}
else
{
r_g[ii] = MATH_T(0);
r_p[ii] = MATH_T(0);
r_m[ii] = MATH_T(0);
r_v[ii] = MATH_T(0);
}
}
#pragma unroll
for (int ii = 0; ii < ILP; ii++)
{
if (mode == ADAM_MODE_0)
{ // L2
r_g[ii] = r_g[ii] + (decay * r_p[ii]);
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = next_m_unbiased / denom;
r_p[ii] = r_p[ii] - (lr * update);
}
else
{ // weight decay
r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
r_p[ii] = r_p[ii] - (lr * update);
}
}
#pragma unroll
for (int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size)
{
p[i] = r_p[ii];
m[i] = r_m[ii];
v[i] = r_v[ii];
}
}
}
}
};
void multi_tensor_adam_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
const float lr,
const float beta1,
const float beta2,
const float epsilon,
const int step,
const int mode,
const int bias_correction,
const float weight_decay)
{
using namespace at;
// Handle bias correction mode
float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
if (bias_correction == 1)
{
bias_correction1 = 1 - std::pow(beta1, step);
bias_correction2 = 1 - std::pow(beta2, step);
}
// Assume single type across p,g,m1,m2 now
DISPATCH_DOUBLE_FLOAT_AND_HALF(
tensor_lists[0][0].scalar_type(), 0, "adam",
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
AdamFunctor<scalar_t_0>(),
beta1,
beta2,
bias_correction1,
bias_correction2,
epsilon,
lr,
(adamMode_t)mode,
weight_decay);)
AT_CUDA_CHECK(cudaGetLastError());
} |
88d26d541a75af64ac290c998c429ff89e8fac78.hip | // !!! This is a file automatically generated by hipify!!!
//#include <helper_cuda.h>
#include <helper_timer.h>
#include <mpfr.h>
#include <qd/dd_real.h>
#include "../../gpuprec/gqd/gqd.cu"
using namespace std;
void qd2gqd(dd_real* dd_data, gdd_real* gdd_data, const unsigned int numElement) {
for (unsigned int i = 0; i < numElement; i++) {
gdd_data[i].x = dd_data[i].x[0];
gdd_data[i].y = dd_data[i].x[1];
}
}
void gqd2qd(gdd_real* gdd_data, dd_real* dd_data, const unsigned int numElement) {
for (unsigned int i = 0; i < numElement; i++) {
dd_data[i].x[0] = gdd_data[i].x;
dd_data[i].x[1] = gdd_data[i].y;
}
}
void qd2gqd2(dd_real dd_data[][5], gdd_real gdd_data[][5], int d1, int d2, int numElement) {
for (unsigned int i = 0; i < d1; i++) {
for (unsigned int j = 0; j < d2; j++) {
gdd_data[i][j].x = dd_data[i][j].x[0];
gdd_data[i][j].y = dd_data[i][j].x[1];
}
}
}
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include <mpfr.h>
#include <math.h>
#include <string.h>
#define NUMBER_PAR_PER_BOX 100
#ifdef RD_WG_SIZE_0_0
#define NUMBER_THREADS RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define NUMBER_THREADS RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define NUMBER_THREADS RD_WG_SIZE
#else
#define NUMBER_THREADS 128
#endif
#define DOT(A,B) ((A.x)*(B.x)+(A.y)*(B.y)+(A.z)*(B.z)) // STABLE
using namespace std;
typedef struct nei_str {
// neighbor box
int x;
int y;
int z;
int number;
long offset;}nei_str;
typedef struct box_str {
// home box
int x;
int y;
int z;
int number;
long offset;
// neighbor boxes
int nn;
::nei_str nei[26];}box_str;
typedef struct dim_str {
// input arguments
int cur_arg;
int arch_arg;
int cores_arg;
int boxes1d_arg;
// system memory
long number_boxes;
long box_mem;
long space_elem;}dim_str;
int isInteger(char *str)
{
if (( *str) == '\0') {
return 0;
}
for (; ( *str) != '\0'; str++) {
if (( *str) < 48 || ( *str) > 57) {
return 0;
}
}
return 1;
}
void checkCUDAError(const char *msg)
{
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
printf("Cuda error: %s: %s.\n",msg,(hipGetErrorString(err)));
fflush(0L);
exit(1);
}
}
__constant__ float dev_par;
__global__ void kernel_gpu_cuda(::dim_str d_dim_gpu,::box_str *d_box_gpu,float *d_rv_v,float *d_rv_x,float *d_rv_y,float *d_rv_z,float *d_qv_gpu,float *d_fv_v,float *d_fv_x,float *d_fv_y,float *d_fv_z)
{
int bx = blockIdx . x;
int tx = threadIdx . x;
int wtx = tx;
if (bx < d_dim_gpu . number_boxes) {
// parameters
float a2 = 2.0 * dev_par * dev_par;
// home box
int first_i;
float *rA_v;
float *rA_x;
float *rA_y;
float *rA_z;
float *fA_v;
float *fA_x;
float *fA_y;
float *fA_z;
__shared__ float rA_shared_v[100];
__shared__ float rA_shared_x[100];
__shared__ float rA_shared_y[100];
__shared__ float rA_shared_z[100];
// nei box
int pointer;
int k = 0;
int first_j;
float *rB_v;
float *rB_x;
float *rB_y;
float *rB_z;
float *qB;
int j = 0;
__shared__ float rB_shared_v[100];
__shared__ float rB_shared_x[100];
__shared__ float rB_shared_y[100];
__shared__ float rB_shared_z[100];
__shared__ float qb_shared[100];
// common
float r2;
float u2;
float vij;
float fs;
float fxij;
float fyij;
float fzij;
float s_x;
float s_y;
float s_z;
first_i = d_box_gpu[bx] . offset;
rA_v =( &d_rv_v[first_i]);
rA_x =( &d_rv_x[first_i]);
rA_y =( &d_rv_y[first_i]);
rA_z =( &d_rv_z[first_i]);
fA_v =( &d_fv_v[first_i]);
fA_x =( &d_fv_x[first_i]);
fA_y =( &d_fv_y[first_i]);
fA_z =( &d_fv_z[first_i]);
while(wtx < 100){
rA_shared_v[wtx] =( rA_v[wtx]);
rA_shared_x[wtx] =( rA_x[wtx]);
rA_shared_y[wtx] =( rA_y[wtx]);
rA_shared_z[wtx] =( rA_z[wtx]);
wtx = wtx + 640;
}
wtx = tx;
__syncthreads();
for (k = 0; k < 1 + d_box_gpu[bx] . nn; k++) {
if (k == 0) {
pointer = bx;
}
else {
pointer = d_box_gpu[bx] . nei[k - 1] . number;
}
first_j = d_box_gpu[pointer] . offset;
rB_v =( &d_rv_v[first_j]);
rB_x =( &d_rv_x[first_j]);
rB_y =( &d_rv_y[first_j]);
rB_z =( &d_rv_z[first_j]);
qB =( &d_qv_gpu[first_j]);
while(wtx < 100){
rB_shared_v[wtx] =( rB_v[wtx]);
rB_shared_x[wtx] =( rB_x[wtx]);
rB_shared_y[wtx] =( rB_y[wtx]);
rB_shared_z[wtx] =( rB_z[wtx]);
qb_shared[wtx] =( qB[wtx]);
wtx = wtx + 640;
}
wtx = tx;
__syncthreads();
while(wtx < 100){
for (j = 0; j < 100; j++) {
r2 = rA_shared_v[wtx] + rB_shared_v[j] - (rA_shared_x[wtx] * rB_shared_x[j] + rA_shared_y[wtx] * rB_shared_y[j] + rA_shared_z[wtx] * rB_shared_z[j]);
u2 = a2 * r2;
vij = exp(-u2);
fs =( 2 * vij);
s_x = rA_shared_x[wtx] - rB_shared_x[j];
fxij =( fs * s_x);
s_y = rA_shared_y[wtx] - rB_shared_y[j];
fyij =( fs * s_y);
s_z = rA_shared_z[wtx] - rB_shared_z[j];
fzij =( fs * s_z);
fA_v[wtx] = fA_v[wtx] + qb_shared[j] * vij;
fA_x[wtx] = fA_x[wtx] + qb_shared[j] * fxij;
fA_y[wtx] = fA_y[wtx] + qb_shared[j] * fyij;
fA_z[wtx] = fA_z[wtx] + qb_shared[j] * fzij;
}
wtx = wtx + 640;
}
wtx = tx;
__syncthreads();
}
}
}
int main(int argc,char *argv[])
{
printf("thread block size of kernel = %d \n",640);
int i;
int j;
int k;
int l;
int m;
int n;
float par_cpu;
::dim_str dim_cpu;
::box_str *box_cpu;
int nh;
dim_cpu . boxes1d_arg = 1;
for (dim_cpu . cur_arg = 1; dim_cpu . cur_arg < argc; dim_cpu . cur_arg++) {
if (strcmp(argv[dim_cpu . cur_arg],"-boxes1d") == 0) {
if (argc >= dim_cpu . cur_arg + 1) {
if (isInteger(argv[dim_cpu . cur_arg + 1]) == 1) {
dim_cpu . boxes1d_arg = atoi(argv[dim_cpu . cur_arg + 1]);
if (dim_cpu . boxes1d_arg < 0) {
printf("ERROR: Wrong value to -boxes1d parameter, cannot be <=0\n");
return 0;
}
dim_cpu . cur_arg = dim_cpu . cur_arg + 1;
}
else {
printf("ERROR: Value to -boxes1d parameter in not a number\n");
return 0;
}
}
else {
printf("ERROR: Missing value to -boxes1d parameter\n");
return 0;
}
}
else {
printf("ERROR: Unknown parameter\n");
return 0;
}
}
printf("Configuration used: boxes1d = %d\n",dim_cpu . boxes1d_arg);
par_cpu = 0.5;
dim_cpu . number_boxes = (dim_cpu . boxes1d_arg * dim_cpu . boxes1d_arg * dim_cpu . boxes1d_arg);
dim_cpu . space_elem = dim_cpu . number_boxes * 100;
dim_cpu . box_mem = (dim_cpu . number_boxes * sizeof(::box_str ));
// allocate boxes
box_cpu = ((::box_str *)(malloc(dim_cpu . box_mem)));
nh = 0;
for (i = 0; i < dim_cpu . boxes1d_arg; i++) {
// home boxes in y direction
for (j = 0; j < dim_cpu . boxes1d_arg; j++) {
// home boxes in x direction
for (k = 0; k < dim_cpu . boxes1d_arg; k++) {
// current home box
box_cpu[nh] . x = k;
box_cpu[nh] . y = j;
box_cpu[nh] . z = i;
box_cpu[nh] . number = nh;
box_cpu[nh] . offset = (nh * 100);
// initialize number of neighbor boxes
box_cpu[nh] . nn = 0;
// neighbor boxes in z direction
for (l = - 1; l < 2; l++) {
// neighbor boxes in y direction
for (m = - 1; m < 2; m++) {
// neighbor boxes in x direction
for (n = - 1; n < 2; n++) {
if ((i + l >= 0 && j + m >= 0 && k + n >= 0) == true && (i + l < dim_cpu . boxes1d_arg && j + m < dim_cpu . boxes1d_arg && k + n < dim_cpu . boxes1d_arg) == true && (l == 0 && m == 0 && n == 0) == false) {
box_cpu[nh] . nei[box_cpu[nh] . nn] . x = k + n;
box_cpu[nh] . nei[box_cpu[nh] . nn] . y = j + m;
box_cpu[nh] . nei[box_cpu[nh] . nn] . z = i + l;
box_cpu[nh] . nei[box_cpu[nh] . nn] . number = box_cpu[nh] . nei[box_cpu[nh] . nn] . z * dim_cpu . boxes1d_arg * dim_cpu . boxes1d_arg + box_cpu[nh] . nei[box_cpu[nh] . nn] . y * dim_cpu . boxes1d_arg + box_cpu[nh] . nei[box_cpu[nh] . nn] . x;
box_cpu[nh] . nei[box_cpu[nh] . nn] . offset = (box_cpu[nh] . nei[box_cpu[nh] . nn] . number * 100);
box_cpu[nh] . nn = box_cpu[nh] . nn + 1;
}
// neighbor boxes in x direction
}
// neighbor boxes in y direction
}
// neighbor boxes in z direction
}
nh = nh + 1;
// home boxes in x direction
}
// home boxes in y direction
}
// home boxes in z direction
}
float *rv_cpu_v = new float [dim_cpu . space_elem];
float *rv_cpu_x = new float [dim_cpu . space_elem];
float *rv_cpu_y = new float [dim_cpu . space_elem];
float *rv_cpu_z = new float [dim_cpu . space_elem];
for (i = 0; i < dim_cpu . space_elem; i = i + 1) {
rv_cpu_v[i] = (rand() % 10 + 1) / 10.0;
rv_cpu_x[i] = (rand() % 10 + 1) / 10.0;
rv_cpu_y[i] = (rand() % 10 + 1) / 10.0;
rv_cpu_z[i] = (rand() % 10 + 1) / 10.0;
}
float *qv_cpu = new float [dim_cpu . space_elem];
for (i = 0; i < dim_cpu . space_elem; i = i + 1) {
qv_cpu[i] = (rand() % 10 + 1) / 10.0;
}
float *fv_cpu_v = new float [dim_cpu . space_elem];
float *fv_cpu_x = new float [dim_cpu . space_elem];
float *fv_cpu_y = new float [dim_cpu . space_elem];
float *fv_cpu_z = new float [dim_cpu . space_elem];
for (i = 0; i < dim_cpu . space_elem; i = i + 1) {
fv_cpu_v[i] = ((float )0.0);
fv_cpu_x[i] = ((float )0.0);
fv_cpu_y[i] = ((float )0.0);
fv_cpu_z[i] = ((float )0.0);
}
::box_str *d_box_gpu;
float *d_rv_v;
float *d_rv_x;
float *d_rv_y;
float *d_rv_z;
float *d_qv_gpu;
float *d_fv_v;
float *d_fv_x;
float *d_fv_y;
float *d_fv_z;
hipDeviceSynchronize();
::dim3 threads;
::dim3 blocks;
blocks . x = dim_cpu . number_boxes;
blocks . y = 1;
threads . x = 640;
threads . y = 1;
hipMemcpyToSymbol((dev_par),(&par_cpu),sizeof(float ));
hipMalloc((void **)(&d_box_gpu),dim_cpu . box_mem);
hipMalloc((void **)(&d_rv_v),dim_cpu . space_elem * sizeof(float ));
hipMalloc((void **)(&d_rv_x),dim_cpu . space_elem * sizeof(float ));
hipMalloc((void **)(&d_rv_y),dim_cpu . space_elem * sizeof(float ));
hipMalloc((void **)(&d_rv_z),dim_cpu . space_elem * sizeof(float ));
hipMalloc((void **)(&d_qv_gpu),dim_cpu . space_elem * sizeof(float ));
hipMalloc((void **)(&d_fv_v),dim_cpu . space_elem * sizeof(float ));
hipMalloc((void **)(&d_fv_x),dim_cpu . space_elem * sizeof(float ));
hipMalloc((void **)(&d_fv_y),dim_cpu . space_elem * sizeof(float ));
hipMalloc((void **)(&d_fv_z),dim_cpu . space_elem * sizeof(float ));
struct timeval start_t;
struct timeval end_t;
struct timeval skt_t;
struct timeval ske_t;
gettimeofday(&start_t,0L);
hipMemcpy(d_box_gpu,box_cpu,dim_cpu . box_mem,hipMemcpyHostToDevice);
hipMemcpy(d_rv_v,rv_cpu_v,dim_cpu . space_elem * sizeof(float ),hipMemcpyHostToDevice);
hipMemcpy(d_rv_x,rv_cpu_x,dim_cpu . space_elem * sizeof(float ),hipMemcpyHostToDevice);
hipMemcpy(d_rv_y,rv_cpu_y,dim_cpu . space_elem * sizeof(float ),hipMemcpyHostToDevice);
hipMemcpy(d_rv_z,rv_cpu_z,dim_cpu . space_elem * sizeof(float ),hipMemcpyHostToDevice);
hipMemcpy(d_qv_gpu,qv_cpu,dim_cpu . space_elem * sizeof(float ),hipMemcpyHostToDevice);
hipMemcpy(d_fv_v,fv_cpu_v,dim_cpu . space_elem * sizeof(float ),hipMemcpyHostToDevice);
hipMemcpy(d_fv_x,fv_cpu_x,dim_cpu . space_elem * sizeof(float ),hipMemcpyHostToDevice);
hipMemcpy(d_fv_y,fv_cpu_y,dim_cpu . space_elem * sizeof(float ),hipMemcpyHostToDevice);
hipMemcpy(d_fv_z,fv_cpu_z,dim_cpu . space_elem * sizeof(float ),hipMemcpyHostToDevice);
gettimeofday(&skt_t,0L);
hipLaunchKernelGGL(( kernel_gpu_cuda), dim3(blocks),dim3(threads), 0, 0, dim_cpu,d_box_gpu,d_rv_v,d_rv_x,d_rv_y,d_rv_z,d_qv_gpu,d_fv_v,d_fv_x,d_fv_y,d_fv_z);
checkCUDAError("Start");
hipDeviceSynchronize();
gettimeofday(&ske_t,0L);
hipMemcpy(fv_cpu_v,d_fv_v,dim_cpu . space_elem * sizeof(float ),hipMemcpyDeviceToHost);
hipMemcpy(fv_cpu_x,d_fv_x,dim_cpu . space_elem * sizeof(float ),hipMemcpyDeviceToHost);
hipMemcpy(fv_cpu_y,d_fv_y,dim_cpu . space_elem * sizeof(float ),hipMemcpyDeviceToHost);
hipMemcpy(fv_cpu_z,d_fv_z,dim_cpu . space_elem * sizeof(float ),hipMemcpyDeviceToHost);
gettimeofday(&end_t,0L);
mpf_t val_x, val_y, val_in, err;
mpf_init2(val_x, 128);
mpf_init2(val_y, 128);
mpf_init2(val_in, 128);
mpf_init2(err, 128);
FILE* infile = fopen("fv_ref.txt", "r");
for(int i = 0; i < dim_cpu.space_elem; i++) {
gmp_fscanf(infile, "%Fe\n", val_in);
mpf_set_d(val_x, fv_cpu_v[i]);
mpf_sub(val_y, val_x, val_in);
mpf_abs(val_x, val_y);
mpf_div(val_x, val_x, val_in);
if (i==0)
mpf_set(err, val_x);
else
mpf_add(err, err, val_x);
gmp_fscanf(infile, "%Fe\n", val_in);
mpf_set_d(val_x, fv_cpu_x[i]);
mpf_sub(val_y, val_x, val_in);
mpf_abs(val_x, val_y);
mpf_div(val_x, val_x, val_in);
mpf_add(err, err, val_x);
gmp_fscanf(infile, "%Fe\n", val_in);
mpf_set_d(val_x, fv_cpu_y[i]);
mpf_sub(val_y, val_x, val_in);
mpf_abs(val_x, val_y);
mpf_div(val_x, val_x, val_in);
mpf_add(err, err, val_x);
gmp_fscanf(infile, "%Fe\n", val_in);
mpf_set_d(val_x, fv_cpu_z[i]);
mpf_sub(val_y, val_x, val_in);
mpf_abs(val_x, val_y);
mpf_div(val_x, val_x, val_in);
mpf_add(err, err, val_x);
}
mpf_div_ui(err, err, 4*dim_cpu.space_elem);
fclose(infile);
gmp_printf("error: %10.5Fe\n", err);
int blockSize;
int minGridSize = dim_cpu.number_boxes;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, kernel_gpu_cuda, 0, 0);
printf("block: %d\n", blockSize);
((std::cout<<"time: ") << (end_t . tv_sec + end_t . tv_usec * 1e-6 - (start_t . tv_sec + start_t . tv_usec * 1e-6)))<<"\n";
((std::cout<<"kernel: ") << ((ske_t . tv_sec - skt_t . tv_sec) + (ske_t . tv_usec - skt_t . tv_usec) * 1e-6)) << endl;
hipFree(d_rv_v);
hipFree(d_rv_x);
hipFree(d_rv_y);
hipFree(d_rv_z);
hipFree(d_qv_gpu);
hipFree(d_fv_v);
hipFree(d_fv_x);
hipFree(d_fv_y);
hipFree(d_fv_z);
hipFree(d_box_gpu);
free(rv_cpu_v);
free(rv_cpu_x);
free(rv_cpu_y);
free(rv_cpu_z);
free(qv_cpu);
free(fv_cpu_v);
free(fv_cpu_x);
free(fv_cpu_y);
free(fv_cpu_z);
free(box_cpu);
return 0;
}
| 88d26d541a75af64ac290c998c429ff89e8fac78.cu | //#include <helper_cuda.h>
#include <helper_timer.h>
#include <mpfr.h>
#include <qd/dd_real.h>
#include "../../gpuprec/gqd/gqd.cu"
using namespace std;
void qd2gqd(dd_real* dd_data, gdd_real* gdd_data, const unsigned int numElement) {
for (unsigned int i = 0; i < numElement; i++) {
gdd_data[i].x = dd_data[i].x[0];
gdd_data[i].y = dd_data[i].x[1];
}
}
void gqd2qd(gdd_real* gdd_data, dd_real* dd_data, const unsigned int numElement) {
for (unsigned int i = 0; i < numElement; i++) {
dd_data[i].x[0] = gdd_data[i].x;
dd_data[i].x[1] = gdd_data[i].y;
}
}
void qd2gqd2(dd_real dd_data[][5], gdd_real gdd_data[][5], int d1, int d2, int numElement) {
for (unsigned int i = 0; i < d1; i++) {
for (unsigned int j = 0; j < d2; j++) {
gdd_data[i][j].x = dd_data[i][j].x[0];
gdd_data[i][j].y = dd_data[i][j].x[1];
}
}
}
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#include <mpfr.h>
#include <math.h>
#include <string.h>
#define NUMBER_PAR_PER_BOX 100
#ifdef RD_WG_SIZE_0_0
#define NUMBER_THREADS RD_WG_SIZE_0_0
#elif defined(RD_WG_SIZE_0)
#define NUMBER_THREADS RD_WG_SIZE_0
#elif defined(RD_WG_SIZE)
#define NUMBER_THREADS RD_WG_SIZE
#else
#define NUMBER_THREADS 128
#endif
#define DOT(A,B) ((A.x)*(B.x)+(A.y)*(B.y)+(A.z)*(B.z)) // STABLE
using namespace std;
typedef struct nei_str {
// neighbor box
int x;
int y;
int z;
int number;
long offset;}nei_str;
typedef struct box_str {
// home box
int x;
int y;
int z;
int number;
long offset;
// neighbor boxes
int nn;
::nei_str nei[26];}box_str;
typedef struct dim_str {
// input arguments
int cur_arg;
int arch_arg;
int cores_arg;
int boxes1d_arg;
// system memory
long number_boxes;
long box_mem;
long space_elem;}dim_str;
int isInteger(char *str)
{
if (( *str) == '\0') {
return 0;
}
for (; ( *str) != '\0'; str++) {
if (( *str) < 48 || ( *str) > 57) {
return 0;
}
}
return 1;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
printf("Cuda error: %s: %s.\n",msg,(cudaGetErrorString(err)));
fflush(0L);
exit(1);
}
}
__constant__ float dev_par;
__global__ void kernel_gpu_cuda(::dim_str d_dim_gpu,::box_str *d_box_gpu,float *d_rv_v,float *d_rv_x,float *d_rv_y,float *d_rv_z,float *d_qv_gpu,float *d_fv_v,float *d_fv_x,float *d_fv_y,float *d_fv_z)
{
int bx = blockIdx . x;
int tx = threadIdx . x;
int wtx = tx;
if (bx < d_dim_gpu . number_boxes) {
// parameters
float a2 = 2.0 * dev_par * dev_par;
// home box
int first_i;
float *rA_v;
float *rA_x;
float *rA_y;
float *rA_z;
float *fA_v;
float *fA_x;
float *fA_y;
float *fA_z;
__shared__ float rA_shared_v[100];
__shared__ float rA_shared_x[100];
__shared__ float rA_shared_y[100];
__shared__ float rA_shared_z[100];
// nei box
int pointer;
int k = 0;
int first_j;
float *rB_v;
float *rB_x;
float *rB_y;
float *rB_z;
float *qB;
int j = 0;
__shared__ float rB_shared_v[100];
__shared__ float rB_shared_x[100];
__shared__ float rB_shared_y[100];
__shared__ float rB_shared_z[100];
__shared__ float qb_shared[100];
// common
float r2;
float u2;
float vij;
float fs;
float fxij;
float fyij;
float fzij;
float s_x;
float s_y;
float s_z;
first_i = d_box_gpu[bx] . offset;
rA_v =( &d_rv_v[first_i]);
rA_x =( &d_rv_x[first_i]);
rA_y =( &d_rv_y[first_i]);
rA_z =( &d_rv_z[first_i]);
fA_v =( &d_fv_v[first_i]);
fA_x =( &d_fv_x[first_i]);
fA_y =( &d_fv_y[first_i]);
fA_z =( &d_fv_z[first_i]);
while(wtx < 100){
rA_shared_v[wtx] =( rA_v[wtx]);
rA_shared_x[wtx] =( rA_x[wtx]);
rA_shared_y[wtx] =( rA_y[wtx]);
rA_shared_z[wtx] =( rA_z[wtx]);
wtx = wtx + 640;
}
wtx = tx;
__syncthreads();
for (k = 0; k < 1 + d_box_gpu[bx] . nn; k++) {
if (k == 0) {
pointer = bx;
}
else {
pointer = d_box_gpu[bx] . nei[k - 1] . number;
}
first_j = d_box_gpu[pointer] . offset;
rB_v =( &d_rv_v[first_j]);
rB_x =( &d_rv_x[first_j]);
rB_y =( &d_rv_y[first_j]);
rB_z =( &d_rv_z[first_j]);
qB =( &d_qv_gpu[first_j]);
while(wtx < 100){
rB_shared_v[wtx] =( rB_v[wtx]);
rB_shared_x[wtx] =( rB_x[wtx]);
rB_shared_y[wtx] =( rB_y[wtx]);
rB_shared_z[wtx] =( rB_z[wtx]);
qb_shared[wtx] =( qB[wtx]);
wtx = wtx + 640;
}
wtx = tx;
__syncthreads();
while(wtx < 100){
for (j = 0; j < 100; j++) {
r2 = rA_shared_v[wtx] + rB_shared_v[j] - (rA_shared_x[wtx] * rB_shared_x[j] + rA_shared_y[wtx] * rB_shared_y[j] + rA_shared_z[wtx] * rB_shared_z[j]);
u2 = a2 * r2;
vij = exp(-u2);
fs =( 2 * vij);
s_x = rA_shared_x[wtx] - rB_shared_x[j];
fxij =( fs * s_x);
s_y = rA_shared_y[wtx] - rB_shared_y[j];
fyij =( fs * s_y);
s_z = rA_shared_z[wtx] - rB_shared_z[j];
fzij =( fs * s_z);
fA_v[wtx] = fA_v[wtx] + qb_shared[j] * vij;
fA_x[wtx] = fA_x[wtx] + qb_shared[j] * fxij;
fA_y[wtx] = fA_y[wtx] + qb_shared[j] * fyij;
fA_z[wtx] = fA_z[wtx] + qb_shared[j] * fzij;
}
wtx = wtx + 640;
}
wtx = tx;
__syncthreads();
}
}
}
int main(int argc,char *argv[])
{
printf("thread block size of kernel = %d \n",640);
int i;
int j;
int k;
int l;
int m;
int n;
float par_cpu;
::dim_str dim_cpu;
::box_str *box_cpu;
int nh;
dim_cpu . boxes1d_arg = 1;
for (dim_cpu . cur_arg = 1; dim_cpu . cur_arg < argc; dim_cpu . cur_arg++) {
if (strcmp(argv[dim_cpu . cur_arg],"-boxes1d") == 0) {
if (argc >= dim_cpu . cur_arg + 1) {
if (isInteger(argv[dim_cpu . cur_arg + 1]) == 1) {
dim_cpu . boxes1d_arg = atoi(argv[dim_cpu . cur_arg + 1]);
if (dim_cpu . boxes1d_arg < 0) {
printf("ERROR: Wrong value to -boxes1d parameter, cannot be <=0\n");
return 0;
}
dim_cpu . cur_arg = dim_cpu . cur_arg + 1;
}
else {
printf("ERROR: Value to -boxes1d parameter in not a number\n");
return 0;
}
}
else {
printf("ERROR: Missing value to -boxes1d parameter\n");
return 0;
}
}
else {
printf("ERROR: Unknown parameter\n");
return 0;
}
}
printf("Configuration used: boxes1d = %d\n",dim_cpu . boxes1d_arg);
par_cpu = 0.5;
dim_cpu . number_boxes = (dim_cpu . boxes1d_arg * dim_cpu . boxes1d_arg * dim_cpu . boxes1d_arg);
dim_cpu . space_elem = dim_cpu . number_boxes * 100;
dim_cpu . box_mem = (dim_cpu . number_boxes * sizeof(::box_str ));
// allocate boxes
box_cpu = ((::box_str *)(malloc(dim_cpu . box_mem)));
nh = 0;
for (i = 0; i < dim_cpu . boxes1d_arg; i++) {
// home boxes in y direction
for (j = 0; j < dim_cpu . boxes1d_arg; j++) {
// home boxes in x direction
for (k = 0; k < dim_cpu . boxes1d_arg; k++) {
// current home box
box_cpu[nh] . x = k;
box_cpu[nh] . y = j;
box_cpu[nh] . z = i;
box_cpu[nh] . number = nh;
box_cpu[nh] . offset = (nh * 100);
// initialize number of neighbor boxes
box_cpu[nh] . nn = 0;
// neighbor boxes in z direction
for (l = - 1; l < 2; l++) {
// neighbor boxes in y direction
for (m = - 1; m < 2; m++) {
// neighbor boxes in x direction
for (n = - 1; n < 2; n++) {
if ((i + l >= 0 && j + m >= 0 && k + n >= 0) == true && (i + l < dim_cpu . boxes1d_arg && j + m < dim_cpu . boxes1d_arg && k + n < dim_cpu . boxes1d_arg) == true && (l == 0 && m == 0 && n == 0) == false) {
box_cpu[nh] . nei[box_cpu[nh] . nn] . x = k + n;
box_cpu[nh] . nei[box_cpu[nh] . nn] . y = j + m;
box_cpu[nh] . nei[box_cpu[nh] . nn] . z = i + l;
box_cpu[nh] . nei[box_cpu[nh] . nn] . number = box_cpu[nh] . nei[box_cpu[nh] . nn] . z * dim_cpu . boxes1d_arg * dim_cpu . boxes1d_arg + box_cpu[nh] . nei[box_cpu[nh] . nn] . y * dim_cpu . boxes1d_arg + box_cpu[nh] . nei[box_cpu[nh] . nn] . x;
box_cpu[nh] . nei[box_cpu[nh] . nn] . offset = (box_cpu[nh] . nei[box_cpu[nh] . nn] . number * 100);
box_cpu[nh] . nn = box_cpu[nh] . nn + 1;
}
// neighbor boxes in x direction
}
// neighbor boxes in y direction
}
// neighbor boxes in z direction
}
nh = nh + 1;
// home boxes in x direction
}
// home boxes in y direction
}
// home boxes in z direction
}
float *rv_cpu_v = new float [dim_cpu . space_elem];
float *rv_cpu_x = new float [dim_cpu . space_elem];
float *rv_cpu_y = new float [dim_cpu . space_elem];
float *rv_cpu_z = new float [dim_cpu . space_elem];
for (i = 0; i < dim_cpu . space_elem; i = i + 1) {
rv_cpu_v[i] = (rand() % 10 + 1) / 10.0;
rv_cpu_x[i] = (rand() % 10 + 1) / 10.0;
rv_cpu_y[i] = (rand() % 10 + 1) / 10.0;
rv_cpu_z[i] = (rand() % 10 + 1) / 10.0;
}
float *qv_cpu = new float [dim_cpu . space_elem];
for (i = 0; i < dim_cpu . space_elem; i = i + 1) {
qv_cpu[i] = (rand() % 10 + 1) / 10.0;
}
float *fv_cpu_v = new float [dim_cpu . space_elem];
float *fv_cpu_x = new float [dim_cpu . space_elem];
float *fv_cpu_y = new float [dim_cpu . space_elem];
float *fv_cpu_z = new float [dim_cpu . space_elem];
for (i = 0; i < dim_cpu . space_elem; i = i + 1) {
fv_cpu_v[i] = ((float )0.0);
fv_cpu_x[i] = ((float )0.0);
fv_cpu_y[i] = ((float )0.0);
fv_cpu_z[i] = ((float )0.0);
}
::box_str *d_box_gpu;
float *d_rv_v;
float *d_rv_x;
float *d_rv_y;
float *d_rv_z;
float *d_qv_gpu;
float *d_fv_v;
float *d_fv_x;
float *d_fv_y;
float *d_fv_z;
cudaThreadSynchronize();
::dim3 threads;
::dim3 blocks;
blocks . x = dim_cpu . number_boxes;
blocks . y = 1;
threads . x = 640;
threads . y = 1;
cudaMemcpyToSymbol((dev_par),(&par_cpu),sizeof(float ));
cudaMalloc((void **)(&d_box_gpu),dim_cpu . box_mem);
cudaMalloc((void **)(&d_rv_v),dim_cpu . space_elem * sizeof(float ));
cudaMalloc((void **)(&d_rv_x),dim_cpu . space_elem * sizeof(float ));
cudaMalloc((void **)(&d_rv_y),dim_cpu . space_elem * sizeof(float ));
cudaMalloc((void **)(&d_rv_z),dim_cpu . space_elem * sizeof(float ));
cudaMalloc((void **)(&d_qv_gpu),dim_cpu . space_elem * sizeof(float ));
cudaMalloc((void **)(&d_fv_v),dim_cpu . space_elem * sizeof(float ));
cudaMalloc((void **)(&d_fv_x),dim_cpu . space_elem * sizeof(float ));
cudaMalloc((void **)(&d_fv_y),dim_cpu . space_elem * sizeof(float ));
cudaMalloc((void **)(&d_fv_z),dim_cpu . space_elem * sizeof(float ));
struct timeval start_t;
struct timeval end_t;
struct timeval skt_t;
struct timeval ske_t;
gettimeofday(&start_t,0L);
cudaMemcpy(d_box_gpu,box_cpu,dim_cpu . box_mem,cudaMemcpyHostToDevice);
cudaMemcpy(d_rv_v,rv_cpu_v,dim_cpu . space_elem * sizeof(float ),cudaMemcpyHostToDevice);
cudaMemcpy(d_rv_x,rv_cpu_x,dim_cpu . space_elem * sizeof(float ),cudaMemcpyHostToDevice);
cudaMemcpy(d_rv_y,rv_cpu_y,dim_cpu . space_elem * sizeof(float ),cudaMemcpyHostToDevice);
cudaMemcpy(d_rv_z,rv_cpu_z,dim_cpu . space_elem * sizeof(float ),cudaMemcpyHostToDevice);
cudaMemcpy(d_qv_gpu,qv_cpu,dim_cpu . space_elem * sizeof(float ),cudaMemcpyHostToDevice);
cudaMemcpy(d_fv_v,fv_cpu_v,dim_cpu . space_elem * sizeof(float ),cudaMemcpyHostToDevice);
cudaMemcpy(d_fv_x,fv_cpu_x,dim_cpu . space_elem * sizeof(float ),cudaMemcpyHostToDevice);
cudaMemcpy(d_fv_y,fv_cpu_y,dim_cpu . space_elem * sizeof(float ),cudaMemcpyHostToDevice);
cudaMemcpy(d_fv_z,fv_cpu_z,dim_cpu . space_elem * sizeof(float ),cudaMemcpyHostToDevice);
gettimeofday(&skt_t,0L);
kernel_gpu_cuda<<<blocks,threads>>>(dim_cpu,d_box_gpu,d_rv_v,d_rv_x,d_rv_y,d_rv_z,d_qv_gpu,d_fv_v,d_fv_x,d_fv_y,d_fv_z);
checkCUDAError("Start");
cudaThreadSynchronize();
gettimeofday(&ske_t,0L);
cudaMemcpy(fv_cpu_v,d_fv_v,dim_cpu . space_elem * sizeof(float ),cudaMemcpyDeviceToHost);
cudaMemcpy(fv_cpu_x,d_fv_x,dim_cpu . space_elem * sizeof(float ),cudaMemcpyDeviceToHost);
cudaMemcpy(fv_cpu_y,d_fv_y,dim_cpu . space_elem * sizeof(float ),cudaMemcpyDeviceToHost);
cudaMemcpy(fv_cpu_z,d_fv_z,dim_cpu . space_elem * sizeof(float ),cudaMemcpyDeviceToHost);
gettimeofday(&end_t,0L);
mpf_t val_x, val_y, val_in, err;
mpf_init2(val_x, 128);
mpf_init2(val_y, 128);
mpf_init2(val_in, 128);
mpf_init2(err, 128);
FILE* infile = fopen("fv_ref.txt", "r");
for(int i = 0; i < dim_cpu.space_elem; i++) {
gmp_fscanf(infile, "%Fe\n", val_in);
mpf_set_d(val_x, fv_cpu_v[i]);
mpf_sub(val_y, val_x, val_in);
mpf_abs(val_x, val_y);
mpf_div(val_x, val_x, val_in);
if (i==0)
mpf_set(err, val_x);
else
mpf_add(err, err, val_x);
gmp_fscanf(infile, "%Fe\n", val_in);
mpf_set_d(val_x, fv_cpu_x[i]);
mpf_sub(val_y, val_x, val_in);
mpf_abs(val_x, val_y);
mpf_div(val_x, val_x, val_in);
mpf_add(err, err, val_x);
gmp_fscanf(infile, "%Fe\n", val_in);
mpf_set_d(val_x, fv_cpu_y[i]);
mpf_sub(val_y, val_x, val_in);
mpf_abs(val_x, val_y);
mpf_div(val_x, val_x, val_in);
mpf_add(err, err, val_x);
gmp_fscanf(infile, "%Fe\n", val_in);
mpf_set_d(val_x, fv_cpu_z[i]);
mpf_sub(val_y, val_x, val_in);
mpf_abs(val_x, val_y);
mpf_div(val_x, val_x, val_in);
mpf_add(err, err, val_x);
}
mpf_div_ui(err, err, 4*dim_cpu.space_elem);
fclose(infile);
gmp_printf("error: %10.5Fe\n", err);
int blockSize;
int minGridSize = dim_cpu.number_boxes;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, kernel_gpu_cuda, 0, 0);
printf("block: %d\n", blockSize);
((std::cout<<"time: ") << (end_t . tv_sec + end_t . tv_usec * 1e-6 - (start_t . tv_sec + start_t . tv_usec * 1e-6)))<<"\n";
((std::cout<<"kernel: ") << ((ske_t . tv_sec - skt_t . tv_sec) + (ske_t . tv_usec - skt_t . tv_usec) * 1e-6)) << endl;
cudaFree(d_rv_v);
cudaFree(d_rv_x);
cudaFree(d_rv_y);
cudaFree(d_rv_z);
cudaFree(d_qv_gpu);
cudaFree(d_fv_v);
cudaFree(d_fv_x);
cudaFree(d_fv_y);
cudaFree(d_fv_z);
cudaFree(d_box_gpu);
free(rv_cpu_v);
free(rv_cpu_x);
free(rv_cpu_y);
free(rv_cpu_z);
free(qv_cpu);
free(fv_cpu_v);
free(fv_cpu_x);
free(fv_cpu_y);
free(fv_cpu_z);
free(box_cpu);
return 0;
}
|
03ff575f7e2344405a5c99da6acc721f6d620b33.hip | // !!! This is a file automatically generated by hipify!!!
//#pragma comment (lib, "cublas.lib")
//#include "stdio.h"
//#include <hip/hip_runtime.h>
//using namespace std;
//#include <ctime>
//#include "hip/hip_runtime.h"
//#include "hiprand/hiprand_kernel.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
//#include <stdlib.h>
//
//#include <string>
//#include <iomanip>
//#include <time.h>
//#include <iostream>
//#include <cmath>
//#include <math.h>
//
//__device__ int _correct_cnt;
//
//__device__ float _res;
//__device__ float _arr[10][10];
//float arr[10][10];
//
//__global__ void test() {
// int ib = blockIdx.x;
// int ix = threadIdx.x;
// int iy = threadIdx.y;
// //for (int l = 0;l < 10;l++)
// // for (int m = 0;m < 10;m++)
// // _res += _arr[l][m];
// __shared__ float data[1024];
// int tid = threadIdx.y*blockDim.x+threadIdx.x;
// data[tid] = 0;
// if(ix<10&&iy<10)
// data[tid] = _arr[ix][iy];
// __syncthreads();
// for (int s = 1024 / 2; s > 0; s >>= 1) {
// if (tid < s)
// data[tid] += data[tid + s];
// __syncthreads();
// }
// if (tid == 0) {
// _res= data[0];
// }
//}
//int main() {
// for (int i = 0;i < 10; i++) {
// for (int j = 0;j < 10;j++) {
// arr[i][j] = rand() % 5;
// }
// }
// hipMemcpyToSymbol(_arr, &arr, 10 * 10 * sizeof(float));
// float sum = 0;
// for (int i = 0;i < 10; i++) {
// for (int j = 0;j < 10;j++) {
// sum += arr[i][j];
// }
// }
// cout << "CPU sum: " <<sum << endl;
// test << <1, dim3(32,32)>> > ();
// float res=0;
// hipMemcpyFromSymbol(&res, _res, sizeof(float));
// cout << "GPU sum: " << res << endl;
// return 0;
//} | 03ff575f7e2344405a5c99da6acc721f6d620b33.cu | //#pragma comment (lib, "cublas.lib")
//#include "stdio.h"
//#include <cuda.h>
//using namespace std;
//#include <ctime>
//#include "cuda_runtime.h"
//#include "curand_kernel.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
//#include <stdlib.h>
//
//#include <string>
//#include <iomanip>
//#include <time.h>
//#include <iostream>
//#include <cmath>
//#include <math.h>
//
//__device__ int _correct_cnt;
//
//__device__ float _res;
//__device__ float _arr[10][10];
//float arr[10][10];
//
//__global__ void test() {
// int ib = blockIdx.x;
// int ix = threadIdx.x;
// int iy = threadIdx.y;
// //for (int l = 0;l < 10;l++)
// // for (int m = 0;m < 10;m++)
// // _res += _arr[l][m];
// __shared__ float data[1024];
// int tid = threadIdx.y*blockDim.x+threadIdx.x;
// data[tid] = 0;
// if(ix<10&&iy<10)
// data[tid] = _arr[ix][iy];
// __syncthreads();
// for (int s = 1024 / 2; s > 0; s >>= 1) {
// if (tid < s)
// data[tid] += data[tid + s];
// __syncthreads();
// }
// if (tid == 0) {
// _res= data[0];
// }
//}
//int main() {
// for (int i = 0;i < 10; i++) {
// for (int j = 0;j < 10;j++) {
// arr[i][j] = rand() % 5;
// }
// }
// cudaMemcpyToSymbol(_arr, &arr, 10 * 10 * sizeof(float));
// float sum = 0;
// for (int i = 0;i < 10; i++) {
// for (int j = 0;j < 10;j++) {
// sum += arr[i][j];
// }
// }
// cout << "CPU sum: " <<sum << endl;
// test << <1, dim3(32,32)>> > ();
// float res=0;
// cudaMemcpyFromSymbol(&res, _res, sizeof(float));
// cout << "GPU sum: " << res << endl;
// return 0;
//} |
e3d8d2b450f9e27e7903a0fb52b85817c57e6a84.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
#include <c10/util/Half.h>
#include <torch/types.h>
#include "pytorch_cuda_helper.hpp"
struct upfirdn2d_kernel_params {
const void *x;
const float *f;
void *y;
int2 up;
int2 down;
int2 pad0;
int flip;
float gain;
int4 inSize; // [width, height, channel, batch]
int4 inStride;
int2 filterSize; // [width, height]
int2 filterStride;
int4 outSize; // [width, height, channel, batch]
int4 outStride;
int sizeMinor;
int sizeMajor;
int loopMinor;
int loopMajor;
int loopX;
int launchMinor;
int launchMajor;
};
//------------------------------------------------------------------------
// CUDA kernel specialization.
struct upfirdn2d_kernel_spec {
void *kernel;
int tileOutW;
int tileOutH;
int loopMinor;
int loopX;
};
//------------------------------------------------------------------------
// CUDA kernel selection.
template <class T>
upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params &p);
//------------------------------------------------------------------------
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//------------------------------------------------------------------------
// Helpers.
template <class T>
struct InternalType;
template <>
struct InternalType<double> {
typedef double scalar_t;
};
template <>
struct InternalType<float> {
typedef float scalar_t;
};
template <>
struct InternalType<c10::Half> {
typedef float scalar_t;
};
static __device__ __forceinline__ int floor_div(int a, int b) {
int t = 1 - a / b;
return (a + t * b) / b - t;
}
//------------------------------------------------------------------------
// Generic CUDA implementation for large filters.
template <class T>
static __global__ void upfirdn2d_kernel_large(upfirdn2d_kernel_params p) {
typedef typename InternalType<T>::scalar_t scalar_t;
// Calculate thread index.
int minorBase = blockIdx.x * blockDim.x + threadIdx.x;
int outY = minorBase / p.launchMinor;
minorBase -= outY * p.launchMinor;
int outXBase = blockIdx.y * p.loopX * blockDim.y + threadIdx.y;
int majorBase = blockIdx.z * p.loopMajor;
if (outXBase >= p.outSize.x | outY >= p.outSize.y | majorBase >= p.sizeMajor)
return;
// Setup Y receptive field.
int midY = outY * p.down.y + p.up.y - 1 - p.pad0.y;
int inY = min(max(floor_div(midY, p.up.y), 0), p.inSize.y);
int h =
min(max(floor_div(midY + p.filterSize.y, p.up.y), 0), p.inSize.y) - inY;
int filterY = midY + p.filterSize.y - (inY + 1) * p.up.y;
if (p.flip) filterY = p.filterSize.y - 1 - filterY;
// Loop over major, minor, and X.
for (int majorIdx = 0, major = majorBase;
majorIdx < p.loopMajor & major < p.sizeMajor; majorIdx++, major++)
for (int minorIdx = 0, minor = minorBase;
minorIdx < p.loopMinor & minor < p.sizeMinor;
minorIdx++, minor += p.launchMinor) {
int nc = major * p.sizeMinor + minor;
int n = nc / p.inSize.z;
int c = nc - n * p.inSize.z;
for (int loopX = 0, outX = outXBase; loopX < p.loopX & outX < p.outSize.x;
loopX++, outX += blockDim.y) {
// Setup X receptive field.
int midX = outX * p.down.x + p.up.x - 1 - p.pad0.x;
int inX = min(max(floor_div(midX, p.up.x), 0), p.inSize.x);
int w =
min(max(floor_div(midX + p.filterSize.x, p.up.x), 0), p.inSize.x) -
inX;
int filterX = midX + p.filterSize.x - (inX + 1) * p.up.x;
if (p.flip) filterX = p.filterSize.x - 1 - filterX;
// Initialize pointers.
const T *xp =
&((const T *)p.x)[inX * p.inStride.x + inY * p.inStride.y +
c * p.inStride.z + n * p.inStride.w];
const float *fp =
&p.f[filterX * p.filterStride.x + filterY * p.filterStride.y];
int filterStepX = ((p.flip) ? p.up.x : -p.up.x) * p.filterStride.x;
int filterStepY = ((p.flip) ? p.up.y : -p.up.y) * p.filterStride.y;
// Inner loop.
scalar_t v = 0;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
v += (scalar_t)(*xp) * (scalar_t)(*fp);
xp += p.inStride.x;
fp += filterStepX;
}
xp += p.inStride.y - w * p.inStride.x;
fp += filterStepY - w * filterStepX;
}
// Store result.
v *= p.gain;
((T *)p.y)[outX * p.outStride.x + outY * p.outStride.y +
c * p.outStride.z + n * p.outStride.w] = (T)v;
}
}
}
//------------------------------------------------------------------------
// Specialized CUDA implementation for small filters.
template <class T, int upx, int upy, int downx, int downy, int filterW,
int filterH, int tileOutW, int tileOutH, int loopMinor>
static __global__ void upfirdn2d_kernel_small(upfirdn2d_kernel_params p) {
typedef typename InternalType<T>::scalar_t scalar_t;
const int tileInW = ((tileOutW - 1) * downx + filterW - 1) / upx + 1;
const int tileInH = ((tileOutH - 1) * downy + filterH - 1) / upy + 1;
__shared__ volatile scalar_t sf[filterH][filterW];
__shared__ volatile scalar_t sx[tileInH][tileInW][loopMinor];
// Calculate tile index.
int minorBase = blockIdx.x;
int tileOutY = minorBase / p.launchMinor;
minorBase -= tileOutY * p.launchMinor;
minorBase *= loopMinor;
tileOutY *= tileOutH;
int tileOutXBase = blockIdx.y * p.loopX * tileOutW;
int majorBase = blockIdx.z * p.loopMajor;
if (tileOutXBase >= p.outSize.x | tileOutY >= p.outSize.y |
majorBase >= p.sizeMajor)
return;
// Load filter (flipped).
for (int tapIdx = threadIdx.x; tapIdx < filterH * filterW;
tapIdx += blockDim.x) {
int fy = tapIdx / filterW;
int fx = tapIdx - fy * filterW;
scalar_t v = 0;
if (fx < p.filterSize.x & fy < p.filterSize.y) {
int ffx = (p.flip) ? fx : p.filterSize.x - 1 - fx;
int ffy = (p.flip) ? fy : p.filterSize.y - 1 - fy;
v = (scalar_t)p.f[ffx * p.filterStride.x + ffy * p.filterStride.y];
}
sf[fy][fx] = v;
}
// Loop over major and X.
for (int majorIdx = 0, major = majorBase;
majorIdx < p.loopMajor & major < p.sizeMajor; majorIdx++, major++) {
int baseNC = major * p.sizeMinor + minorBase;
int n = baseNC / p.inSize.z;
int baseC = baseNC - n * p.inSize.z;
for (int loopX = 0, tileOutX = tileOutXBase;
loopX < p.loopX & tileOutX < p.outSize.x;
loopX++, tileOutX += tileOutW) {
// Load input pixels.
int tileMidX = tileOutX * downx + upx - 1 - p.pad0.x;
int tileMidY = tileOutY * downy + upy - 1 - p.pad0.y;
int tileInX = floor_div(tileMidX, upx);
int tileInY = floor_div(tileMidY, upy);
__syncthreads();
for (int inIdx = threadIdx.x; inIdx < tileInH * tileInW * loopMinor;
inIdx += blockDim.x) {
int relC = inIdx;
int relInX = relC / loopMinor;
int relInY = relInX / tileInW;
relC -= relInX * loopMinor;
relInX -= relInY * tileInW;
int c = baseC + relC;
int inX = tileInX + relInX;
int inY = tileInY + relInY;
scalar_t v = 0;
if (inX >= 0 & inY >= 0 & inX < p.inSize.x & inY < p.inSize.y &
c < p.inSize.z)
v = (scalar_t)(
(const T *)p.x)[inX * p.inStride.x + inY * p.inStride.y +
c * p.inStride.z + n * p.inStride.w];
sx[relInY][relInX][relC] = v;
}
// Loop over output pixels.
__syncthreads();
for (int outIdx = threadIdx.x; outIdx < tileOutH * tileOutW * loopMinor;
outIdx += blockDim.x) {
int relC = outIdx;
int relOutX = relC / loopMinor;
int relOutY = relOutX / tileOutW;
relC -= relOutX * loopMinor;
relOutX -= relOutY * tileOutW;
int c = baseC + relC;
int outX = tileOutX + relOutX;
int outY = tileOutY + relOutY;
// Setup receptive field.
int midX = tileMidX + relOutX * downx;
int midY = tileMidY + relOutY * downy;
int inX = floor_div(midX, upx);
int inY = floor_div(midY, upy);
int relInX = inX - tileInX;
int relInY = inY - tileInY;
int filterX = (inX + 1) * upx - midX - 1; // flipped
int filterY = (inY + 1) * upy - midY - 1; // flipped
// Inner loop.
if (outX < p.outSize.x & outY < p.outSize.y & c < p.outSize.z) {
scalar_t v = 0;
#pragma unroll
for (int y = 0; y < filterH / upy; y++)
#pragma unroll
for (int x = 0; x < filterW / upx; x++)
v += sx[relInY + y][relInX + x][relC] *
sf[filterY + y * upy][filterX + x * upx];
v *= p.gain;
((T *)p.y)[outX * p.outStride.x + outY * p.outStride.y +
c * p.outStride.z + n * p.outStride.w] = (T)v;
}
}
}
}
}
//------------------------------------------------------------------------
// CUDA kernel selection.
template <class T>
upfirdn2d_kernel_spec choose_upfirdn2d_kernel(
const upfirdn2d_kernel_params &p) {
int s = p.inStride.z, fx = p.filterSize.x, fy = p.filterSize.y;
upfirdn2d_kernel_spec spec = {(void *)upfirdn2d_kernel_large<T>, -1, -1, 1,
4}; // contiguous
if (s == 1)
spec = {(void *)upfirdn2d_kernel_large<T>, -1, -1, 4, 1}; // channels_last
// No up/downsampling.
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 24, 24, 64, 32, 1>,
64, 32, 1, 1};
if (s != 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 16, 16, 64, 32, 1>,
64, 32, 1, 1};
if (s != 1 && fx <= 7 && fy <= 7)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 7, 7, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 6, 6, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 5 && fy <= 5)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 5, 5, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 4, 4, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 3 && fy <= 3)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 3, 3, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 24, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 16, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 8, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 24, 32, 32, 1>,
32, 32, 1, 1};
if (s != 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 16, 32, 32, 1>,
32, 32, 1, 1};
if (s != 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 8, 32, 32, 1>,
32, 32, 1, 1};
// channels_last
if (s == 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 24, 24, 32, 32, 1>,
32, 32, 1, 1};
if (s == 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 16, 16, 32, 32, 1>,
32, 32, 1, 1};
if (s == 1 && fx <= 7 && fy <= 7)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 7, 7, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 6, 6, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 5 && fy <= 5)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 5, 5, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 4, 4, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 3 && fy <= 3)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 3, 3, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 24, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 16, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 8, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 24, 1, 128, 16>,
1, 128, 16, 1};
if (s == 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 16, 1, 128, 16>,
1, 128, 16, 1};
if (s == 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 8, 1, 128, 16>,
1, 128, 16, 1};
}
// 2x upsampling.
if (p.up.x == 2 && p.up.y == 2 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 24, 24, 64, 32, 1>,
64, 32, 1, 1};
if (s != 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 16, 16, 64, 32, 1>,
64, 32, 1, 1};
if (s != 1 && fx <= 8 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 8, 8, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 6, 6, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 4, 4, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 2 && fy <= 2)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 2, 2, 64, 16, 1>,
64, 16, 1, 1};
// channels_last
if (s == 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 24, 24, 32, 32, 1>,
32, 32, 1, 1};
if (s == 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 16, 16, 32, 32, 1>,
32, 32, 1, 1};
if (s == 1 && fx <= 8 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 8, 8, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 6, 6, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 4, 4, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 2 && fy <= 2)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 2, 2, 16, 16, 8>,
16, 16, 8, 1};
}
if (p.up.x == 2 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 24, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 16, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 8, 1, 128, 8, 1>,
128, 8, 1, 1};
// channels_last
if (s == 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 24, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 16, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 8, 1, 128, 1, 16>,
128, 1, 16, 1};
}
if (p.up.x == 1 && p.up.y == 2 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 24, 32, 32, 1>,
32, 32, 1, 1};
if (s != 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 16, 32, 32, 1>,
32, 32, 1, 1};
if (s != 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 8, 32, 32, 1>,
32, 32, 1, 1};
// channels_last
if (s == 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 24, 1, 128, 16>,
1, 128, 16, 1};
if (s == 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 16, 1, 128, 16>,
1, 128, 16, 1};
if (s == 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 8, 1, 128, 16>,
1, 128, 16, 1};
}
// 2x downsampling.
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 2 && p.down.y == 2) {
// contiguous
if (s != 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 24, 24, 32, 16, 1>,
32, 16, 1, 1};
if (s != 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 16, 16, 32, 16, 1>,
32, 16, 1, 1};
if (s != 1 && fx <= 8 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 8, 8, 32, 8, 1>, 32,
8, 1, 1};
if (s != 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 6, 6, 32, 8, 1>, 32,
8, 1, 1};
if (s != 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 4, 4, 32, 8, 1>, 32,
8, 1, 1};
if (s != 1 && fx <= 2 && fy <= 2)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 2, 2, 32, 8, 1>, 32,
8, 1, 1};
// channels_last
if (s == 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 24, 24, 16, 16, 1>,
16, 16, 1, 1};
if (s == 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 16, 16, 16, 16, 1>,
16, 16, 1, 1};
if (s == 1 && fx <= 8 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 8, 8, 8, 8, 8>, 8,
8, 8, 1};
if (s == 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 6, 6, 8, 8, 8>, 8,
8, 8, 1};
if (s == 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 4, 4, 8, 8, 8>, 8,
8, 8, 1};
if (s == 1 && fx <= 2 && fy <= 2)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 2, 2, 8, 8, 8>, 8,
8, 8, 1};
}
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 2 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 24, 1, 64, 8, 1>,
64, 8, 1, 1};
if (s != 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 16, 1, 64, 8, 1>,
64, 8, 1, 1};
if (s != 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 8, 1, 64, 8, 1>, 64,
8, 1, 1};
// channels_last
if (s == 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 24, 1, 64, 1, 8>,
64, 1, 8, 1};
if (s == 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 16, 1, 64, 1, 8>,
64, 1, 8, 1};
if (s == 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 8, 1, 64, 1, 8>, 64,
1, 8, 1};
}
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 2) {
// contiguous
if (s != 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 24, 32, 16, 1>,
32, 16, 1, 1};
if (s != 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 16, 32, 16, 1>,
32, 16, 1, 1};
if (s != 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 8, 32, 16, 1>,
32, 16, 1, 1};
// channels_last
if (s == 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 24, 1, 64, 8>, 1,
64, 8, 1};
if (s == 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 16, 1, 64, 8>, 1,
64, 8, 1};
if (s == 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 8, 1, 64, 8>, 1,
64, 8, 1};
}
// 4x upsampling.
if (p.up.x == 4 && p.up.y == 4 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 48 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 4, 1, 1, 48, 48, 64, 32, 1>,
64, 32, 1, 1};
if (s != 1 && fx <= 32 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 4, 1, 1, 32, 32, 64, 32, 1>,
64, 32, 1, 1};
// channels_last
if (s == 1 && fx <= 48 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 4, 1, 1, 48, 48, 32, 32, 1>,
32, 32, 1, 1};
if (s == 1 && fx <= 32 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 4, 1, 1, 32, 32, 32, 32, 1>,
32, 32, 1, 1};
}
if (p.up.x == 4 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 48 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 1, 1, 1, 48, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 32 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 1, 1, 1, 32, 1, 128, 8, 1>,
128, 8, 1, 1};
// channels_last
if (s == 1 && fx <= 48 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 1, 1, 1, 48, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 32 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 1, 1, 1, 32, 1, 128, 1, 16>,
128, 1, 16, 1};
}
if (p.up.x == 1 && p.up.y == 4 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 1 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 4, 1, 1, 1, 48, 32, 32, 1>,
32, 32, 1, 1};
if (s != 1 && fx <= 1 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 4, 1, 1, 1, 32, 32, 32, 1>,
32, 32, 1, 1};
// channels_last
if (s == 1 && fx <= 1 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 4, 1, 1, 1, 48, 1, 128, 16>,
1, 128, 16, 1};
if (s == 1 && fx <= 1 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 4, 1, 1, 1, 32, 1, 128, 16>,
1, 128, 16, 1};
}
// 4x downsampling (inefficient).
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 4 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 48 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 4, 1, 48, 1, 32, 8, 1>,
32, 8, 1, 1};
if (s != 1 && fx <= 32 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 4, 1, 32, 1, 32, 8, 1>,
32, 8, 1, 1};
// channels_last
if (s == 1 && fx <= 48 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 4, 1, 48, 1, 32, 1, 8>,
32, 1, 8, 1};
if (s == 1 && fx <= 32 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 4, 1, 32, 1, 32, 1, 8>,
32, 1, 8, 1};
}
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 4) {
// contiguous
if (s != 1 && fx <= 1 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 4, 1, 48, 32, 8, 1>,
32, 8, 1, 1};
if (s != 1 && fx <= 1 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 4, 1, 32, 32, 8, 1>,
32, 8, 1, 1};
// channels_last
if (s == 1 && fx <= 1 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 4, 1, 48, 1, 32, 8>, 1,
32, 8, 1};
if (s == 1 && fx <= 1 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 4, 1, 32, 1, 32, 8>, 1,
32, 8, 1};
}
return spec;
}
//------------------------------------------------------------------------
// Template specializations.
template upfirdn2d_kernel_spec choose_upfirdn2d_kernel<double>(
const upfirdn2d_kernel_params &p);
template upfirdn2d_kernel_spec choose_upfirdn2d_kernel<float>(
const upfirdn2d_kernel_params &p);
template upfirdn2d_kernel_spec choose_upfirdn2d_kernel<c10::Half>(
const upfirdn2d_kernel_params &p);
//------------------------------------------------------------------------
//------------------------------------------------------------------------
torch::Tensor upfirdn2d_op(torch::Tensor x, torch::Tensor f, int upx, int upy,
int downx, int downy, int padx0, int padx1,
int pady0, int pady1, bool flip, float gain) {
// Validate arguments.
TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
TORCH_CHECK(f.device() == x.device(),
"f must reside on the same device as x");
TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32");
TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
TORCH_CHECK(f.numel() <= INT_MAX, "f is too large");
TORCH_CHECK(x.numel() > 0, "x has zero size");
TORCH_CHECK(f.numel() > 0, "f has zero size");
TORCH_CHECK(x.dim() == 4, "x must be rank 4");
TORCH_CHECK(f.dim() == 2, "f must be rank 2");
TORCH_CHECK((x.size(0) - 1) * x.stride(0) + (x.size(1) - 1) * x.stride(1) +
(x.size(2) - 1) * x.stride(2) +
(x.size(3) - 1) * x.stride(3) <=
INT_MAX,
"x memory footprint is too large");
TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1");
TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1");
TORCH_CHECK(downx >= 1 && downy >= 1,
"downsampling factor must be at least 1");
// Create output tensor.
const at::hip::OptionalHIPGuardMasqueradingAsCUDA device_guard(device_of(x));
int outW =
((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx;
int outH =
((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy;
TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1");
torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW},
x.options(), x.suggest_memory_format());
TORCH_CHECK(y.numel() <= INT_MAX, "output is too large");
TORCH_CHECK((y.size(0) - 1) * y.stride(0) + (y.size(1) - 1) * y.stride(1) +
(y.size(2) - 1) * y.stride(2) +
(y.size(3) - 1) * y.stride(3) <=
INT_MAX,
"output memory footprint is too large");
// Initialize CUDA kernel parameters.
upfirdn2d_kernel_params p;
p.x = x.data_ptr();
p.f = f.data_ptr<float>();
p.y = y.data_ptr();
p.up = make_int2(upx, upy);
p.down = make_int2(downx, downy);
p.pad0 = make_int2(padx0, pady0);
p.flip = (flip) ? 1 : 0;
p.gain = gain;
p.inSize =
make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0));
p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1),
(int)x.stride(0));
p.filterSize = make_int2((int)f.size(1), (int)f.size(0));
p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0));
p.outSize =
make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0));
p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1),
(int)y.stride(0));
p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z;
p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1;
// Choose CUDA kernel.
upfirdn2d_kernel_spec spec;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] {
spec = choose_upfirdn2d_kernel<scalar_t>(p);
});
// Set looping options.
p.loopMajor = (p.sizeMajor - 1) / 16384 + 1;
p.loopMinor = spec.loopMinor;
p.loopX = spec.loopX;
p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1;
p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1;
// Compute grid size.
dim3 blockSize, gridSize;
if (spec.tileOutW < 0) // large
{
blockSize = dim3(4, 32, 1);
gridSize =
dim3(((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor,
(p.outSize.x - 1) / (blockSize.y * p.loopX) + 1, p.launchMajor);
} else // small
{
blockSize = dim3(256, 1, 1);
gridSize =
dim3(((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor,
(p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1, p.launchMajor);
}
// Launch CUDA kernel.
void *args[] = {&p};
#ifdef MMCV_WITH_HIP
AT_CUDA_CHECK(hipLaunchKernel(spec.kernel, gridSize, blockSize, args, 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA()));
#else
AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA()));
#endif
return y;
}
| e3d8d2b450f9e27e7903a0fb52b85817c57e6a84.cu | // Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
#include <c10/util/Half.h>
#include <torch/types.h>
#include "pytorch_cuda_helper.hpp"
struct upfirdn2d_kernel_params {
const void *x;
const float *f;
void *y;
int2 up;
int2 down;
int2 pad0;
int flip;
float gain;
int4 inSize; // [width, height, channel, batch]
int4 inStride;
int2 filterSize; // [width, height]
int2 filterStride;
int4 outSize; // [width, height, channel, batch]
int4 outStride;
int sizeMinor;
int sizeMajor;
int loopMinor;
int loopMajor;
int loopX;
int launchMinor;
int launchMajor;
};
//------------------------------------------------------------------------
// CUDA kernel specialization.
struct upfirdn2d_kernel_spec {
void *kernel;
int tileOutW;
int tileOutH;
int loopMinor;
int loopX;
};
//------------------------------------------------------------------------
// CUDA kernel selection.
template <class T>
upfirdn2d_kernel_spec choose_upfirdn2d_kernel(const upfirdn2d_kernel_params &p);
//------------------------------------------------------------------------
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
//
// NVIDIA CORPORATION and its licensors retain all intellectual property
// and proprietary rights in and to this software, related documentation
// and any modifications thereto. Any use, reproduction, disclosure or
// distribution of this software and related documentation without an express
// license agreement from NVIDIA CORPORATION is strictly prohibited.
//------------------------------------------------------------------------
// Helpers.
template <class T>
struct InternalType;
template <>
struct InternalType<double> {
typedef double scalar_t;
};
template <>
struct InternalType<float> {
typedef float scalar_t;
};
template <>
struct InternalType<c10::Half> {
typedef float scalar_t;
};
static __device__ __forceinline__ int floor_div(int a, int b) {
int t = 1 - a / b;
return (a + t * b) / b - t;
}
//------------------------------------------------------------------------
// Generic CUDA implementation for large filters.
template <class T>
static __global__ void upfirdn2d_kernel_large(upfirdn2d_kernel_params p) {
typedef typename InternalType<T>::scalar_t scalar_t;
// Calculate thread index.
int minorBase = blockIdx.x * blockDim.x + threadIdx.x;
int outY = minorBase / p.launchMinor;
minorBase -= outY * p.launchMinor;
int outXBase = blockIdx.y * p.loopX * blockDim.y + threadIdx.y;
int majorBase = blockIdx.z * p.loopMajor;
if (outXBase >= p.outSize.x | outY >= p.outSize.y | majorBase >= p.sizeMajor)
return;
// Setup Y receptive field.
int midY = outY * p.down.y + p.up.y - 1 - p.pad0.y;
int inY = min(max(floor_div(midY, p.up.y), 0), p.inSize.y);
int h =
min(max(floor_div(midY + p.filterSize.y, p.up.y), 0), p.inSize.y) - inY;
int filterY = midY + p.filterSize.y - (inY + 1) * p.up.y;
if (p.flip) filterY = p.filterSize.y - 1 - filterY;
// Loop over major, minor, and X.
for (int majorIdx = 0, major = majorBase;
majorIdx < p.loopMajor & major < p.sizeMajor; majorIdx++, major++)
for (int minorIdx = 0, minor = minorBase;
minorIdx < p.loopMinor & minor < p.sizeMinor;
minorIdx++, minor += p.launchMinor) {
int nc = major * p.sizeMinor + minor;
int n = nc / p.inSize.z;
int c = nc - n * p.inSize.z;
for (int loopX = 0, outX = outXBase; loopX < p.loopX & outX < p.outSize.x;
loopX++, outX += blockDim.y) {
// Setup X receptive field.
int midX = outX * p.down.x + p.up.x - 1 - p.pad0.x;
int inX = min(max(floor_div(midX, p.up.x), 0), p.inSize.x);
int w =
min(max(floor_div(midX + p.filterSize.x, p.up.x), 0), p.inSize.x) -
inX;
int filterX = midX + p.filterSize.x - (inX + 1) * p.up.x;
if (p.flip) filterX = p.filterSize.x - 1 - filterX;
// Initialize pointers.
const T *xp =
&((const T *)p.x)[inX * p.inStride.x + inY * p.inStride.y +
c * p.inStride.z + n * p.inStride.w];
const float *fp =
&p.f[filterX * p.filterStride.x + filterY * p.filterStride.y];
int filterStepX = ((p.flip) ? p.up.x : -p.up.x) * p.filterStride.x;
int filterStepY = ((p.flip) ? p.up.y : -p.up.y) * p.filterStride.y;
// Inner loop.
scalar_t v = 0;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
v += (scalar_t)(*xp) * (scalar_t)(*fp);
xp += p.inStride.x;
fp += filterStepX;
}
xp += p.inStride.y - w * p.inStride.x;
fp += filterStepY - w * filterStepX;
}
// Store result.
v *= p.gain;
((T *)p.y)[outX * p.outStride.x + outY * p.outStride.y +
c * p.outStride.z + n * p.outStride.w] = (T)v;
}
}
}
//------------------------------------------------------------------------
// Specialized CUDA implementation for small filters.
template <class T, int upx, int upy, int downx, int downy, int filterW,
int filterH, int tileOutW, int tileOutH, int loopMinor>
static __global__ void upfirdn2d_kernel_small(upfirdn2d_kernel_params p) {
typedef typename InternalType<T>::scalar_t scalar_t;
const int tileInW = ((tileOutW - 1) * downx + filterW - 1) / upx + 1;
const int tileInH = ((tileOutH - 1) * downy + filterH - 1) / upy + 1;
__shared__ volatile scalar_t sf[filterH][filterW];
__shared__ volatile scalar_t sx[tileInH][tileInW][loopMinor];
// Calculate tile index.
int minorBase = blockIdx.x;
int tileOutY = minorBase / p.launchMinor;
minorBase -= tileOutY * p.launchMinor;
minorBase *= loopMinor;
tileOutY *= tileOutH;
int tileOutXBase = blockIdx.y * p.loopX * tileOutW;
int majorBase = blockIdx.z * p.loopMajor;
if (tileOutXBase >= p.outSize.x | tileOutY >= p.outSize.y |
majorBase >= p.sizeMajor)
return;
// Load filter (flipped).
for (int tapIdx = threadIdx.x; tapIdx < filterH * filterW;
tapIdx += blockDim.x) {
int fy = tapIdx / filterW;
int fx = tapIdx - fy * filterW;
scalar_t v = 0;
if (fx < p.filterSize.x & fy < p.filterSize.y) {
int ffx = (p.flip) ? fx : p.filterSize.x - 1 - fx;
int ffy = (p.flip) ? fy : p.filterSize.y - 1 - fy;
v = (scalar_t)p.f[ffx * p.filterStride.x + ffy * p.filterStride.y];
}
sf[fy][fx] = v;
}
// Loop over major and X.
for (int majorIdx = 0, major = majorBase;
majorIdx < p.loopMajor & major < p.sizeMajor; majorIdx++, major++) {
int baseNC = major * p.sizeMinor + minorBase;
int n = baseNC / p.inSize.z;
int baseC = baseNC - n * p.inSize.z;
for (int loopX = 0, tileOutX = tileOutXBase;
loopX < p.loopX & tileOutX < p.outSize.x;
loopX++, tileOutX += tileOutW) {
// Load input pixels.
int tileMidX = tileOutX * downx + upx - 1 - p.pad0.x;
int tileMidY = tileOutY * downy + upy - 1 - p.pad0.y;
int tileInX = floor_div(tileMidX, upx);
int tileInY = floor_div(tileMidY, upy);
__syncthreads();
for (int inIdx = threadIdx.x; inIdx < tileInH * tileInW * loopMinor;
inIdx += blockDim.x) {
int relC = inIdx;
int relInX = relC / loopMinor;
int relInY = relInX / tileInW;
relC -= relInX * loopMinor;
relInX -= relInY * tileInW;
int c = baseC + relC;
int inX = tileInX + relInX;
int inY = tileInY + relInY;
scalar_t v = 0;
if (inX >= 0 & inY >= 0 & inX < p.inSize.x & inY < p.inSize.y &
c < p.inSize.z)
v = (scalar_t)(
(const T *)p.x)[inX * p.inStride.x + inY * p.inStride.y +
c * p.inStride.z + n * p.inStride.w];
sx[relInY][relInX][relC] = v;
}
// Loop over output pixels.
__syncthreads();
for (int outIdx = threadIdx.x; outIdx < tileOutH * tileOutW * loopMinor;
outIdx += blockDim.x) {
int relC = outIdx;
int relOutX = relC / loopMinor;
int relOutY = relOutX / tileOutW;
relC -= relOutX * loopMinor;
relOutX -= relOutY * tileOutW;
int c = baseC + relC;
int outX = tileOutX + relOutX;
int outY = tileOutY + relOutY;
// Setup receptive field.
int midX = tileMidX + relOutX * downx;
int midY = tileMidY + relOutY * downy;
int inX = floor_div(midX, upx);
int inY = floor_div(midY, upy);
int relInX = inX - tileInX;
int relInY = inY - tileInY;
int filterX = (inX + 1) * upx - midX - 1; // flipped
int filterY = (inY + 1) * upy - midY - 1; // flipped
// Inner loop.
if (outX < p.outSize.x & outY < p.outSize.y & c < p.outSize.z) {
scalar_t v = 0;
#pragma unroll
for (int y = 0; y < filterH / upy; y++)
#pragma unroll
for (int x = 0; x < filterW / upx; x++)
v += sx[relInY + y][relInX + x][relC] *
sf[filterY + y * upy][filterX + x * upx];
v *= p.gain;
((T *)p.y)[outX * p.outStride.x + outY * p.outStride.y +
c * p.outStride.z + n * p.outStride.w] = (T)v;
}
}
}
}
}
//------------------------------------------------------------------------
// CUDA kernel selection.
template <class T>
upfirdn2d_kernel_spec choose_upfirdn2d_kernel(
const upfirdn2d_kernel_params &p) {
int s = p.inStride.z, fx = p.filterSize.x, fy = p.filterSize.y;
upfirdn2d_kernel_spec spec = {(void *)upfirdn2d_kernel_large<T>, -1, -1, 1,
4}; // contiguous
if (s == 1)
spec = {(void *)upfirdn2d_kernel_large<T>, -1, -1, 4, 1}; // channels_last
// No up/downsampling.
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 24, 24, 64, 32, 1>,
64, 32, 1, 1};
if (s != 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 16, 16, 64, 32, 1>,
64, 32, 1, 1};
if (s != 1 && fx <= 7 && fy <= 7)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 7, 7, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 6, 6, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 5 && fy <= 5)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 5, 5, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 4, 4, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 3 && fy <= 3)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 3, 3, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 24, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 16, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 8, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 24, 32, 32, 1>,
32, 32, 1, 1};
if (s != 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 16, 32, 32, 1>,
32, 32, 1, 1};
if (s != 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 8, 32, 32, 1>,
32, 32, 1, 1};
// channels_last
if (s == 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 24, 24, 32, 32, 1>,
32, 32, 1, 1};
if (s == 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 16, 16, 32, 32, 1>,
32, 32, 1, 1};
if (s == 1 && fx <= 7 && fy <= 7)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 7, 7, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 6, 6, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 5 && fy <= 5)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 5, 5, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 4, 4, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 3 && fy <= 3)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 3, 3, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 24, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 16, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 8, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 24, 1, 128, 16>,
1, 128, 16, 1};
if (s == 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 16, 1, 128, 16>,
1, 128, 16, 1};
if (s == 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 1, 1, 8, 1, 128, 16>,
1, 128, 16, 1};
}
// 2x upsampling.
if (p.up.x == 2 && p.up.y == 2 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 24, 24, 64, 32, 1>,
64, 32, 1, 1};
if (s != 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 16, 16, 64, 32, 1>,
64, 32, 1, 1};
if (s != 1 && fx <= 8 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 8, 8, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 6, 6, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 4, 4, 64, 16, 1>,
64, 16, 1, 1};
if (s != 1 && fx <= 2 && fy <= 2)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 2, 2, 64, 16, 1>,
64, 16, 1, 1};
// channels_last
if (s == 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 24, 24, 32, 32, 1>,
32, 32, 1, 1};
if (s == 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 16, 16, 32, 32, 1>,
32, 32, 1, 1};
if (s == 1 && fx <= 8 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 8, 8, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 6, 6, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 4, 4, 16, 16, 8>,
16, 16, 8, 1};
if (s == 1 && fx <= 2 && fy <= 2)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 2, 1, 1, 2, 2, 16, 16, 8>,
16, 16, 8, 1};
}
if (p.up.x == 2 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 24, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 16, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 8, 1, 128, 8, 1>,
128, 8, 1, 1};
// channels_last
if (s == 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 24, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 16, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 2, 1, 1, 1, 8, 1, 128, 1, 16>,
128, 1, 16, 1};
}
if (p.up.x == 1 && p.up.y == 2 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 24, 32, 32, 1>,
32, 32, 1, 1};
if (s != 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 16, 32, 32, 1>,
32, 32, 1, 1};
if (s != 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 8, 32, 32, 1>,
32, 32, 1, 1};
// channels_last
if (s == 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 24, 1, 128, 16>,
1, 128, 16, 1};
if (s == 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 16, 1, 128, 16>,
1, 128, 16, 1};
if (s == 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 2, 1, 1, 1, 8, 1, 128, 16>,
1, 128, 16, 1};
}
// 2x downsampling.
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 2 && p.down.y == 2) {
// contiguous
if (s != 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 24, 24, 32, 16, 1>,
32, 16, 1, 1};
if (s != 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 16, 16, 32, 16, 1>,
32, 16, 1, 1};
if (s != 1 && fx <= 8 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 8, 8, 32, 8, 1>, 32,
8, 1, 1};
if (s != 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 6, 6, 32, 8, 1>, 32,
8, 1, 1};
if (s != 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 4, 4, 32, 8, 1>, 32,
8, 1, 1};
if (s != 1 && fx <= 2 && fy <= 2)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 2, 2, 32, 8, 1>, 32,
8, 1, 1};
// channels_last
if (s == 1 && fx <= 24 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 24, 24, 16, 16, 1>,
16, 16, 1, 1};
if (s == 1 && fx <= 16 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 16, 16, 16, 16, 1>,
16, 16, 1, 1};
if (s == 1 && fx <= 8 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 8, 8, 8, 8, 8>, 8,
8, 8, 1};
if (s == 1 && fx <= 6 && fy <= 6)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 6, 6, 8, 8, 8>, 8,
8, 8, 1};
if (s == 1 && fx <= 4 && fy <= 4)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 4, 4, 8, 8, 8>, 8,
8, 8, 1};
if (s == 1 && fx <= 2 && fy <= 2)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 2, 2, 2, 8, 8, 8>, 8,
8, 8, 1};
}
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 2 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 24, 1, 64, 8, 1>,
64, 8, 1, 1};
if (s != 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 16, 1, 64, 8, 1>,
64, 8, 1, 1};
if (s != 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 8, 1, 64, 8, 1>, 64,
8, 1, 1};
// channels_last
if (s == 1 && fx <= 24 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 24, 1, 64, 1, 8>,
64, 1, 8, 1};
if (s == 1 && fx <= 16 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 16, 1, 64, 1, 8>,
64, 1, 8, 1};
if (s == 1 && fx <= 8 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 2, 1, 8, 1, 64, 1, 8>, 64,
1, 8, 1};
}
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 2) {
// contiguous
if (s != 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 24, 32, 16, 1>,
32, 16, 1, 1};
if (s != 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 16, 32, 16, 1>,
32, 16, 1, 1};
if (s != 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 8, 32, 16, 1>,
32, 16, 1, 1};
// channels_last
if (s == 1 && fx <= 1 && fy <= 24)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 24, 1, 64, 8>, 1,
64, 8, 1};
if (s == 1 && fx <= 1 && fy <= 16)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 16, 1, 64, 8>, 1,
64, 8, 1};
if (s == 1 && fx <= 1 && fy <= 8)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 2, 1, 8, 1, 64, 8>, 1,
64, 8, 1};
}
// 4x upsampling.
if (p.up.x == 4 && p.up.y == 4 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 48 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 4, 1, 1, 48, 48, 64, 32, 1>,
64, 32, 1, 1};
if (s != 1 && fx <= 32 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 4, 1, 1, 32, 32, 64, 32, 1>,
64, 32, 1, 1};
// channels_last
if (s == 1 && fx <= 48 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 4, 1, 1, 48, 48, 32, 32, 1>,
32, 32, 1, 1};
if (s == 1 && fx <= 32 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 4, 1, 1, 32, 32, 32, 32, 1>,
32, 32, 1, 1};
}
if (p.up.x == 4 && p.up.y == 1 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 48 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 1, 1, 1, 48, 1, 128, 8, 1>,
128, 8, 1, 1};
if (s != 1 && fx <= 32 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 1, 1, 1, 32, 1, 128, 8, 1>,
128, 8, 1, 1};
// channels_last
if (s == 1 && fx <= 48 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 1, 1, 1, 48, 1, 128, 1, 16>,
128, 1, 16, 1};
if (s == 1 && fx <= 32 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 4, 1, 1, 1, 32, 1, 128, 1, 16>,
128, 1, 16, 1};
}
if (p.up.x == 1 && p.up.y == 4 && p.down.x == 1 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 1 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 4, 1, 1, 1, 48, 32, 32, 1>,
32, 32, 1, 1};
if (s != 1 && fx <= 1 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 4, 1, 1, 1, 32, 32, 32, 1>,
32, 32, 1, 1};
// channels_last
if (s == 1 && fx <= 1 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 4, 1, 1, 1, 48, 1, 128, 16>,
1, 128, 16, 1};
if (s == 1 && fx <= 1 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 4, 1, 1, 1, 32, 1, 128, 16>,
1, 128, 16, 1};
}
// 4x downsampling (inefficient).
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 4 && p.down.y == 1) {
// contiguous
if (s != 1 && fx <= 48 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 4, 1, 48, 1, 32, 8, 1>,
32, 8, 1, 1};
if (s != 1 && fx <= 32 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 4, 1, 32, 1, 32, 8, 1>,
32, 8, 1, 1};
// channels_last
if (s == 1 && fx <= 48 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 4, 1, 48, 1, 32, 1, 8>,
32, 1, 8, 1};
if (s == 1 && fx <= 32 && fy <= 1)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 4, 1, 32, 1, 32, 1, 8>,
32, 1, 8, 1};
}
if (p.up.x == 1 && p.up.y == 1 && p.down.x == 1 && p.down.y == 4) {
// contiguous
if (s != 1 && fx <= 1 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 4, 1, 48, 32, 8, 1>,
32, 8, 1, 1};
if (s != 1 && fx <= 1 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 4, 1, 32, 32, 8, 1>,
32, 8, 1, 1};
// channels_last
if (s == 1 && fx <= 1 && fy <= 48)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 4, 1, 48, 1, 32, 8>, 1,
32, 8, 1};
if (s == 1 && fx <= 1 && fy <= 32)
spec = {(void *)upfirdn2d_kernel_small<T, 1, 1, 1, 4, 1, 32, 1, 32, 8>, 1,
32, 8, 1};
}
return spec;
}
//------------------------------------------------------------------------
// Template specializations.
template upfirdn2d_kernel_spec choose_upfirdn2d_kernel<double>(
const upfirdn2d_kernel_params &p);
template upfirdn2d_kernel_spec choose_upfirdn2d_kernel<float>(
const upfirdn2d_kernel_params &p);
template upfirdn2d_kernel_spec choose_upfirdn2d_kernel<c10::Half>(
const upfirdn2d_kernel_params &p);
//------------------------------------------------------------------------
//------------------------------------------------------------------------
torch::Tensor upfirdn2d_op(torch::Tensor x, torch::Tensor f, int upx, int upy,
int downx, int downy, int padx0, int padx1,
int pady0, int pady1, bool flip, float gain) {
// Validate arguments.
TORCH_CHECK(x.is_cuda(), "x must reside on CUDA device");
TORCH_CHECK(f.device() == x.device(),
"f must reside on the same device as x");
TORCH_CHECK(f.dtype() == torch::kFloat, "f must be float32");
TORCH_CHECK(x.numel() <= INT_MAX, "x is too large");
TORCH_CHECK(f.numel() <= INT_MAX, "f is too large");
TORCH_CHECK(x.numel() > 0, "x has zero size");
TORCH_CHECK(f.numel() > 0, "f has zero size");
TORCH_CHECK(x.dim() == 4, "x must be rank 4");
TORCH_CHECK(f.dim() == 2, "f must be rank 2");
TORCH_CHECK((x.size(0) - 1) * x.stride(0) + (x.size(1) - 1) * x.stride(1) +
(x.size(2) - 1) * x.stride(2) +
(x.size(3) - 1) * x.stride(3) <=
INT_MAX,
"x memory footprint is too large");
TORCH_CHECK(f.size(0) >= 1 && f.size(1) >= 1, "f must be at least 1x1");
TORCH_CHECK(upx >= 1 && upy >= 1, "upsampling factor must be at least 1");
TORCH_CHECK(downx >= 1 && downy >= 1,
"downsampling factor must be at least 1");
// Create output tensor.
const at::cuda::OptionalCUDAGuard device_guard(device_of(x));
int outW =
((int)x.size(3) * upx + padx0 + padx1 - (int)f.size(1) + downx) / downx;
int outH =
((int)x.size(2) * upy + pady0 + pady1 - (int)f.size(0) + downy) / downy;
TORCH_CHECK(outW >= 1 && outH >= 1, "output must be at least 1x1");
torch::Tensor y = torch::empty({x.size(0), x.size(1), outH, outW},
x.options(), x.suggest_memory_format());
TORCH_CHECK(y.numel() <= INT_MAX, "output is too large");
TORCH_CHECK((y.size(0) - 1) * y.stride(0) + (y.size(1) - 1) * y.stride(1) +
(y.size(2) - 1) * y.stride(2) +
(y.size(3) - 1) * y.stride(3) <=
INT_MAX,
"output memory footprint is too large");
// Initialize CUDA kernel parameters.
upfirdn2d_kernel_params p;
p.x = x.data_ptr();
p.f = f.data_ptr<float>();
p.y = y.data_ptr();
p.up = make_int2(upx, upy);
p.down = make_int2(downx, downy);
p.pad0 = make_int2(padx0, pady0);
p.flip = (flip) ? 1 : 0;
p.gain = gain;
p.inSize =
make_int4((int)x.size(3), (int)x.size(2), (int)x.size(1), (int)x.size(0));
p.inStride = make_int4((int)x.stride(3), (int)x.stride(2), (int)x.stride(1),
(int)x.stride(0));
p.filterSize = make_int2((int)f.size(1), (int)f.size(0));
p.filterStride = make_int2((int)f.stride(1), (int)f.stride(0));
p.outSize =
make_int4((int)y.size(3), (int)y.size(2), (int)y.size(1), (int)y.size(0));
p.outStride = make_int4((int)y.stride(3), (int)y.stride(2), (int)y.stride(1),
(int)y.stride(0));
p.sizeMajor = (p.inStride.z == 1) ? p.inSize.w : p.inSize.w * p.inSize.z;
p.sizeMinor = (p.inStride.z == 1) ? p.inSize.z : 1;
// Choose CUDA kernel.
upfirdn2d_kernel_spec spec;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(x.scalar_type(), "upfirdn2d_cuda", [&] {
spec = choose_upfirdn2d_kernel<scalar_t>(p);
});
// Set looping options.
p.loopMajor = (p.sizeMajor - 1) / 16384 + 1;
p.loopMinor = spec.loopMinor;
p.loopX = spec.loopX;
p.launchMinor = (p.sizeMinor - 1) / p.loopMinor + 1;
p.launchMajor = (p.sizeMajor - 1) / p.loopMajor + 1;
// Compute grid size.
dim3 blockSize, gridSize;
if (spec.tileOutW < 0) // large
{
blockSize = dim3(4, 32, 1);
gridSize =
dim3(((p.outSize.y - 1) / blockSize.x + 1) * p.launchMinor,
(p.outSize.x - 1) / (blockSize.y * p.loopX) + 1, p.launchMajor);
} else // small
{
blockSize = dim3(256, 1, 1);
gridSize =
dim3(((p.outSize.y - 1) / spec.tileOutH + 1) * p.launchMinor,
(p.outSize.x - 1) / (spec.tileOutW * p.loopX) + 1, p.launchMajor);
}
// Launch CUDA kernel.
void *args[] = {&p};
#ifdef MMCV_WITH_HIP
AT_CUDA_CHECK(hipLaunchKernel(spec.kernel, gridSize, blockSize, args, 0,
at::cuda::getCurrentCUDAStream()));
#else
AT_CUDA_CHECK(cudaLaunchKernel(spec.kernel, gridSize, blockSize, args, 0,
at::cuda::getCurrentCUDAStream()));
#endif
return y;
}
|
9851bb2d1b22d061e05015ae4b6e7f9e156fa748.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include "paddle/fluid/operators/fused/fused_seqpool_cvm_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/phi/core/mixed_vector.h"
namespace paddle {
namespace operators {
template <typename T>
using Vector = phi::Vector<T>;
#define CUDA_KERNEL_LOOP(i, n) \
for (auto i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// normal
template <typename T>
__global__ void FusedSeqpoolKernelNormal(const size_t N,
T **input_values,
T **seqpool_output_values,
size_t **lods_values,
const int batch_size,
const int embedding_size,
const float pad_value) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / embedding_size;
int offset = i % embedding_size;
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
auto &start = *(lods_values[x] + y);
auto &end = *(lods_values[x] + y + 1);
T val = static_cast<T>(pad_value);
for (auto k = start; k < end; ++k) {
val += *(input_values[x] + k * embedding_size + offset);
}
*(seqpool_output_values[x] + y * embedding_size + offset) = val;
}
}
// join need show click input
template <typename T>
__global__ void FusedCVMKernelWithCVM(const size_t N,
T **output_values,
T **seqpool_output_values,
const int batch_size,
const int embedding_size,
const int cvm_offset) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / embedding_size;
int offset = i % embedding_size;
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
if (offset == 0) { // show
*(output_values[x] + y * embedding_size) =
log(*(seqpool_output_values[x] + y * embedding_size) + 1);
} else if (offset == 1) { // click
*(output_values[x] + y * embedding_size + offset) =
log(*(seqpool_output_values[x] + y * embedding_size + 1) + 1) -
log(*(seqpool_output_values[x] + y * embedding_size) + 1);
} else {
*(output_values[x] + y * embedding_size + offset) =
*(seqpool_output_values[x] + y * embedding_size + offset);
}
}
}
// update not need show click input
template <typename T>
__global__ void FusedCVMKernelNoCVM(const size_t N,
T **output_values,
T **seqpool_output_values,
const int batch_size,
const int no_cvm_embedding_size,
const int cvm_offset) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / no_cvm_embedding_size;
int offset = i % no_cvm_embedding_size;
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
// no cvm
*(output_values[x] + y * no_cvm_embedding_size + offset) =
*(seqpool_output_values[x] + y * (no_cvm_embedding_size + cvm_offset) +
offset + cvm_offset);
}
}
template <typename T>
void FusedSeqpoolCVM(const framework::ExecutionContext
&ctx, // const paddle::platform::Place &place,
const std::vector<const T *> &input_data,
const std::vector<T *> &output_data,
const std::vector<T *> &seqpool_output_data,
std::vector<const size_t *> lods,
const int batch_size,
const int slot_num,
const int embedding_size,
const float padding_value,
const bool use_cvm,
const int cvm_offset) {
auto stream = ctx.template device_context<phi::GPUContext>().stream();
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
size_t total_ptr_len = input_data.size() + output_data.size() +
seqpool_output_data.size() + lods.size();
auto temp_ptr =
memory::AllocShared(ctx.GetPlace(), total_ptr_len * sizeof(void *));
void *ptr = temp_ptr->ptr();
#ifdef PADDLE_WITH_HIP
T **gpu_input_values = reinterpret_cast<T **>(temp_ptr->ptr());
platform::GpuMemcpyAsync(gpu_input_values,
input_data.data(),
input_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_output_values =
reinterpret_cast<T **>(&gpu_input_values[input_data.size()]);
platform::GpuMemcpyAsync(gpu_output_values,
output_data.data(),
output_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_seqpool_output_values =
reinterpret_cast<T **>(&gpu_output_values[output_data.size()]);
platform::GpuMemcpyAsync(gpu_seqpool_output_values,
seqpool_output_data.data(),
seqpool_output_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
size_t **lods_values = reinterpret_cast<size_t **>(
&gpu_seqpool_output_values[seqpool_output_data.size()]);
platform::GpuMemcpyAsync(lods_values,
lods.data(),
lods.size() * sizeof(size_t *),
hipMemcpyHostToDevice,
stream);
#else
T **gpu_input_values = reinterpret_cast<T **>(temp_ptr->ptr());
platform::GpuMemcpyAsync(gpu_input_values,
input_data.data(),
input_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_output_values =
reinterpret_cast<T **>(&gpu_input_values[input_data.size()]);
platform::GpuMemcpyAsync(gpu_output_values,
output_data.data(),
output_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_seqpool_output_values =
reinterpret_cast<T **>(&gpu_output_values[output_data.size()]);
platform::GpuMemcpyAsync(gpu_seqpool_output_values,
seqpool_output_data.data(),
seqpool_output_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
size_t **lods_values = reinterpret_cast<size_t **>(
&gpu_seqpool_output_values[seqpool_output_data.size()]);
platform::GpuMemcpyAsync(lods_values,
lods.data(),
lods.size() * sizeof(size_t *),
hipMemcpyHostToDevice,
stream);
#endif
size_t N = static_cast<size_t>(batch_size * slot_num * embedding_size);
platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(dev_ctx, N);
// first sum pool
hipLaunchKernelGGL(( FusedSeqpoolKernelNormal), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
stream, N,
gpu_input_values,
gpu_seqpool_output_values,
lods_values,
batch_size,
embedding_size,
padding_value);
// second log
if (use_cvm) {
hipLaunchKernelGGL(( FusedCVMKernelWithCVM), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
stream, N,
gpu_output_values,
gpu_seqpool_output_values,
batch_size,
embedding_size,
cvm_offset);
} else {
// not need show click input
N = static_cast<size_t>(batch_size * slot_num *
(embedding_size - cvm_offset));
platform::GpuLaunchConfig config =
platform::GetGpuLaunchConfig1D(dev_ctx, N);
hipLaunchKernelGGL(( FusedCVMKernelNoCVM), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
stream, N,
gpu_output_values,
gpu_seqpool_output_values,
batch_size,
(embedding_size - cvm_offset),
cvm_offset);
}
}
// join grad
template <typename T>
__global__ void FusedSeqpoolCVMGradKernelWithCVM(const size_t N,
T **out_grads_values,
T **in_grads_values,
T **cvm_values,
size_t **lods_values,
const int batch_size,
const int embedding_size,
const int cvm_offset) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / embedding_size;
int offset = i % embedding_size; // embedx offset
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
T &val = (offset < cvm_offset)
? *(cvm_values[x] + y * cvm_offset + offset)
: *(out_grads_values[x] + y * embedding_size + offset);
auto &start = *(lods_values[x] + y);
auto &end = *(lods_values[x] + y + 1);
for (auto k = start; k < end; ++k) {
*(in_grads_values[x] + k * embedding_size + offset) = val;
}
}
}
// join only show not has click
template <typename T>
__global__ void FusedSeqpoolCVMGradKernelWithShow(const size_t N,
T **out_grads_values,
T **in_grads_values,
T **cvm_values,
size_t **lods_values,
const int batch_size,
const int embedding_size,
const int cvm_offset) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / embedding_size;
int offset = i % embedding_size; // embedx offset
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
T &val =
(offset < cvm_offset)
? *(cvm_values[x] + y * cvm_offset + offset)
: *(out_grads_values[x] + y * (embedding_size - 1) + offset - 1);
auto &start = *(lods_values[x] + y);
auto &end = *(lods_values[x] + y + 1);
for (auto k = start; k < end; ++k) {
*(in_grads_values[x] + k * embedding_size + offset) = val;
}
}
}
// update grad
template <typename T>
__global__ void FusedSeqpoolCVMGradKernelNoCVM(const size_t N,
T **out_grads_values,
T **in_grads_values,
T **cvm_values,
size_t **lods_values,
const int batch_size,
const int embedding_size,
const int cvm_offset) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / embedding_size;
int offset = i % embedding_size; // embedx offset
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
T &val = (offset < cvm_offset)
? *(cvm_values[x] + y * cvm_offset + offset)
: *(out_grads_values[x] + y * (embedding_size - cvm_offset) +
offset - cvm_offset);
auto &start = *(lods_values[x] + y);
auto &end = *(lods_values[x] + y + 1);
for (auto k = start; k < end; ++k) {
*(in_grads_values[x] + k * embedding_size + offset) = val;
}
}
}
template <typename T>
void FusedSeqpoolCVMGrad(const framework::ExecutionContext &ctx,
const std::vector<const T *> &out_grads_data,
const std::vector<T *> &in_grads_data,
const std::vector<const T *> &cvm_data,
const std::vector<const size_t *> &lods,
const int batch_size,
const int slot_num,
const int embedding_size,
const bool use_cvm,
const int cvm_offset) {
auto stream = ctx.template device_context<phi::GPUContext>().stream();
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
size_t total_ptr_len = out_grads_data.size() + in_grads_data.size() +
cvm_data.size() + lods.size();
auto temp_ptr =
memory::AllocShared(ctx.GetPlace(), total_ptr_len * sizeof(void *));
#ifdef PADDLE_WITH_HIP
T **gpu_out_grads_values = reinterpret_cast<T **>(temp_ptr->ptr());
platform::GpuMemcpyAsync(gpu_out_grads_values,
out_grads_data.data(),
out_grads_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_in_grads_values =
reinterpret_cast<T **>(&gpu_out_grads_values[out_grads_data.size()]);
platform::GpuMemcpyAsync(gpu_in_grads_values,
in_grads_data.data(),
in_grads_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_cvm_values =
reinterpret_cast<T **>(&gpu_in_grads_values[in_grads_data.size()]);
platform::GpuMemcpyAsync(gpu_cvm_values,
cvm_data.data(),
cvm_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
size_t **lods_values =
reinterpret_cast<size_t **>(&gpu_cvm_values[cvm_data.size()]);
platform::GpuMemcpyAsync(lods_values,
lods.data(),
lods.size() * sizeof(size_t *),
hipMemcpyHostToDevice,
stream);
#else
T **gpu_out_grads_values = reinterpret_cast<T **>(temp_ptr->ptr());
platform::GpuMemcpyAsync(gpu_out_grads_values,
out_grads_data.data(),
out_grads_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_in_grads_values =
reinterpret_cast<T **>(&gpu_out_grads_values[out_grads_data.size()]);
platform::GpuMemcpyAsync(gpu_in_grads_values,
in_grads_data.data(),
in_grads_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_cvm_values =
reinterpret_cast<T **>(&gpu_in_grads_values[in_grads_data.size()]);
platform::GpuMemcpyAsync(gpu_cvm_values,
cvm_data.data(),
cvm_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
size_t **lods_values =
reinterpret_cast<size_t **>(&gpu_cvm_values[cvm_data.size()]);
platform::GpuMemcpyAsync(lods_values,
lods.data(),
lods.size() * sizeof(size_t *),
hipMemcpyHostToDevice,
stream);
#endif
size_t N = static_cast<size_t>(batch_size * slot_num * embedding_size);
auto config = platform::GetGpuLaunchConfig1D(dev_ctx, N);
if (use_cvm) {
// join grad
hipLaunchKernelGGL(( FusedSeqpoolCVMGradKernelWithCVM), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
stream, N,
gpu_out_grads_values,
gpu_in_grads_values,
gpu_cvm_values,
lods_values,
batch_size,
embedding_size,
cvm_offset);
} else {
// update grad
hipLaunchKernelGGL(( FusedSeqpoolCVMGradKernelNoCVM), dim3(config.block_per_grid.x),
dim3(config.thread_per_block.x),
0,
stream, N,
gpu_out_grads_values,
gpu_in_grads_values,
gpu_cvm_values,
lods_values,
batch_size,
embedding_size,
cvm_offset);
}
}
template <typename T, typename DeviceContext>
class FusedSeqpoolCVMCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto inputs = ctx.MultiInput<phi::DenseTensor>("X");
auto outputs = ctx.MultiOutput<phi::DenseTensor>("Out");
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
const auto slot_size = inputs.size();
std::vector<const float *> input_data(slot_size);
std::vector<const size_t *> lods_data(slot_size);
std::vector<T *> output_data(slot_size);
std::vector<phi::DenseTensor> seqpool_outputs(slot_size);
std::vector<T *> seqpool_output_data(slot_size);
auto padding_value = ctx.Attr<float>("pad_value");
auto use_cvm = ctx.Attr<bool>("use_cvm");
const int cvm_offset = ctx.Attr<int>("cvm_offset");
int embedding_size = inputs[0]->numel() / inputs[0]->dims()[0];
int batch_size = -1;
std::vector<phi::MixVector<size_t> *> mix_lods_v(slot_size);
for (size_t i = 0; i < slot_size; ++i) {
const auto *input = inputs[i];
Vector<size_t> lods;
if (input->lod().size() != 0) {
auto lod = input->lod();
lods = lod[0];
} else {
lods.push_back(0);
for (int i = 0; i < input->dims()[0]; i++) {
lods.push_back(i + 1);
}
}
int cur_batch_size =
input->lod().size() ? input->lod()[0].size() - 1 : input->dims()[0];
if (batch_size == -1) {
batch_size = cur_batch_size;
} else {
PADDLE_ENFORCE_EQ(batch_size,
cur_batch_size,
platform::errors::PreconditionNotMet(
"The batch size of all input should be same, "
"please cheack, last batchsize is %d, current "
"batchsize is %d",
batch_size,
cur_batch_size));
}
input_data[i] = reinterpret_cast<const T *>(input->data<T>());
auto *output = outputs[i];
if (use_cvm) {
output->Resize({batch_size, embedding_size});
} else {
output->Resize({batch_size, embedding_size - cvm_offset});
}
output_data[i] = reinterpret_cast<T *>(
dev_ctx.Alloc<T>(output, output->numel() * sizeof(T)));
mix_lods_v[i] = new phi::MixVector<size_t>(&lods);
lods_data[i] = mix_lods_v[i]->CUDAData(ctx.GetPlace());
seqpool_outputs[i].Resize({batch_size, embedding_size});
seqpool_output_data[i] = reinterpret_cast<T *>(dev_ctx.Alloc<T>(
&seqpool_outputs[i], seqpool_outputs[i].numel() * sizeof(T)));
}
FusedSeqpoolCVM(ctx,
input_data,
output_data,
seqpool_output_data,
lods_data,
batch_size,
slot_size,
embedding_size,
padding_value,
use_cvm,
cvm_offset);
for (int i = 0; i < slot_size; i++) {
delete mix_lods_v[i];
}
}
};
template <typename T, typename DeviceContext>
class FusedSeqpoolCVMGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto out_grads =
ctx.MultiInput<phi::DenseTensor>(framework::GradVarName("Out"));
auto in_grads =
ctx.MultiOutput<phi::DenseTensor>(framework::GradVarName("X"));
auto *cvm = ctx.Input<phi::DenseTensor>("CVM");
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
std::string pooltype = ctx.Attr<std::string>("pooltype");
auto use_cvm = ctx.Attr<bool>("use_cvm");
const int cvm_offset = ctx.Attr<int>("cvm_offset");
const auto slot_size = in_grads.size();
std::vector<const T *> out_grads_data(slot_size);
std::vector<T *> in_grads_data(slot_size);
std::vector<const T *> cvm_data(slot_size);
std::vector<const size_t *> lods_data(slot_size);
int embedding_size = in_grads[0]->numel() / in_grads[0]->dims()[0];
int batch_size = -1;
std::vector<phi::MixVector<size_t> *> mix_lods_v(slot_size);
for (size_t i = 0; i < slot_size; ++i) {
auto *in_grad = in_grads[i];
Vector<size_t> lods;
if (in_grad->lod().size() != 0) {
auto lod = in_grad->lod();
lods = lod[0];
} else {
lods.push_back(0);
for (int i = 0; i < in_grad->dims()[0]; i++) {
lods.push_back(i + 1);
}
}
int cur_batch_size = in_grad->lod().size() ? in_grad->lod()[0].size() - 1
: in_grad->dims()[0];
if (batch_size == -1) {
batch_size = cur_batch_size;
} else {
PADDLE_ENFORCE_EQ(batch_size,
cur_batch_size,
platform::errors::PreconditionNotMet(
"The batch size of all input should be same, "
"please cheack, last batchsize is %d, current "
"batchsize is %d",
batch_size,
cur_batch_size));
}
auto *out_grad = out_grads[i];
out_grads_data[i] = reinterpret_cast<const T *>(out_grad->data<T>());
in_grads_data[i] = reinterpret_cast<T *>(
dev_ctx.Alloc<T>(in_grad, in_grad->numel() * sizeof(T)));
mix_lods_v[i] = new phi::MixVector<size_t>(&lods);
lods_data[i] = mix_lods_v[i]->CUDAData(ctx.GetPlace());
cvm_data[i] = reinterpret_cast<const T *>(cvm->data<T>());
}
FusedSeqpoolCVMGrad(ctx,
out_grads_data,
in_grads_data,
cvm_data,
lods_data,
batch_size,
slot_size,
embedding_size,
use_cvm,
cvm_offset);
for (int i = 0; i < slot_size; i++) {
delete mix_lods_v[i];
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(
fused_seqpool_cvm, GPU, ALL_LAYOUT, ops::FusedSeqpoolCVMCUDAKernel, float) {
}
PD_REGISTER_STRUCT_KERNEL(fused_seqpool_cvm_grad,
GPU,
ALL_LAYOUT,
ops::FusedSeqpoolCVMGradCUDAKernel,
float) {}
| 9851bb2d1b22d061e05015ae4b6e7f9e156fa748.cu | // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include "paddle/fluid/operators/fused/fused_seqpool_cvm_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
#include "paddle/fluid/platform/device/gpu/gpu_launch_config.h"
#include "paddle/phi/core/mixed_vector.h"
namespace paddle {
namespace operators {
template <typename T>
using Vector = phi::Vector<T>;
#define CUDA_KERNEL_LOOP(i, n) \
for (auto i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \
i += blockDim.x * gridDim.x)
// normal
template <typename T>
__global__ void FusedSeqpoolKernelNormal(const size_t N,
T **input_values,
T **seqpool_output_values,
size_t **lods_values,
const int batch_size,
const int embedding_size,
const float pad_value) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / embedding_size;
int offset = i % embedding_size;
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
auto &start = *(lods_values[x] + y);
auto &end = *(lods_values[x] + y + 1);
T val = static_cast<T>(pad_value);
for (auto k = start; k < end; ++k) {
val += *(input_values[x] + k * embedding_size + offset);
}
*(seqpool_output_values[x] + y * embedding_size + offset) = val;
}
}
// join need show click input
template <typename T>
__global__ void FusedCVMKernelWithCVM(const size_t N,
T **output_values,
T **seqpool_output_values,
const int batch_size,
const int embedding_size,
const int cvm_offset) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / embedding_size;
int offset = i % embedding_size;
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
if (offset == 0) { // show
*(output_values[x] + y * embedding_size) =
log(*(seqpool_output_values[x] + y * embedding_size) + 1);
} else if (offset == 1) { // click
*(output_values[x] + y * embedding_size + offset) =
log(*(seqpool_output_values[x] + y * embedding_size + 1) + 1) -
log(*(seqpool_output_values[x] + y * embedding_size) + 1);
} else {
*(output_values[x] + y * embedding_size + offset) =
*(seqpool_output_values[x] + y * embedding_size + offset);
}
}
}
// update not need show click input
template <typename T>
__global__ void FusedCVMKernelNoCVM(const size_t N,
T **output_values,
T **seqpool_output_values,
const int batch_size,
const int no_cvm_embedding_size,
const int cvm_offset) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / no_cvm_embedding_size;
int offset = i % no_cvm_embedding_size;
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
// no cvm
*(output_values[x] + y * no_cvm_embedding_size + offset) =
*(seqpool_output_values[x] + y * (no_cvm_embedding_size + cvm_offset) +
offset + cvm_offset);
}
}
template <typename T>
void FusedSeqpoolCVM(const framework::ExecutionContext
&ctx, // const paddle::platform::Place &place,
const std::vector<const T *> &input_data,
const std::vector<T *> &output_data,
const std::vector<T *> &seqpool_output_data,
std::vector<const size_t *> lods,
const int batch_size,
const int slot_num,
const int embedding_size,
const float padding_value,
const bool use_cvm,
const int cvm_offset) {
auto stream = ctx.template device_context<phi::GPUContext>().stream();
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
size_t total_ptr_len = input_data.size() + output_data.size() +
seqpool_output_data.size() + lods.size();
auto temp_ptr =
memory::AllocShared(ctx.GetPlace(), total_ptr_len * sizeof(void *));
void *ptr = temp_ptr->ptr();
#ifdef PADDLE_WITH_HIP
T **gpu_input_values = reinterpret_cast<T **>(temp_ptr->ptr());
platform::GpuMemcpyAsync(gpu_input_values,
input_data.data(),
input_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_output_values =
reinterpret_cast<T **>(&gpu_input_values[input_data.size()]);
platform::GpuMemcpyAsync(gpu_output_values,
output_data.data(),
output_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_seqpool_output_values =
reinterpret_cast<T **>(&gpu_output_values[output_data.size()]);
platform::GpuMemcpyAsync(gpu_seqpool_output_values,
seqpool_output_data.data(),
seqpool_output_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
size_t **lods_values = reinterpret_cast<size_t **>(
&gpu_seqpool_output_values[seqpool_output_data.size()]);
platform::GpuMemcpyAsync(lods_values,
lods.data(),
lods.size() * sizeof(size_t *),
hipMemcpyHostToDevice,
stream);
#else
T **gpu_input_values = reinterpret_cast<T **>(temp_ptr->ptr());
platform::GpuMemcpyAsync(gpu_input_values,
input_data.data(),
input_data.size() * sizeof(T *),
cudaMemcpyHostToDevice,
stream);
T **gpu_output_values =
reinterpret_cast<T **>(&gpu_input_values[input_data.size()]);
platform::GpuMemcpyAsync(gpu_output_values,
output_data.data(),
output_data.size() * sizeof(T *),
cudaMemcpyHostToDevice,
stream);
T **gpu_seqpool_output_values =
reinterpret_cast<T **>(&gpu_output_values[output_data.size()]);
platform::GpuMemcpyAsync(gpu_seqpool_output_values,
seqpool_output_data.data(),
seqpool_output_data.size() * sizeof(T *),
cudaMemcpyHostToDevice,
stream);
size_t **lods_values = reinterpret_cast<size_t **>(
&gpu_seqpool_output_values[seqpool_output_data.size()]);
platform::GpuMemcpyAsync(lods_values,
lods.data(),
lods.size() * sizeof(size_t *),
cudaMemcpyHostToDevice,
stream);
#endif
size_t N = static_cast<size_t>(batch_size * slot_num * embedding_size);
platform::GpuLaunchConfig config = platform::GetGpuLaunchConfig1D(dev_ctx, N);
// first sum pool
FusedSeqpoolKernelNormal<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
stream>>>(N,
gpu_input_values,
gpu_seqpool_output_values,
lods_values,
batch_size,
embedding_size,
padding_value);
// second log
if (use_cvm) {
FusedCVMKernelWithCVM<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
stream>>>(N,
gpu_output_values,
gpu_seqpool_output_values,
batch_size,
embedding_size,
cvm_offset);
} else {
// not need show click input
N = static_cast<size_t>(batch_size * slot_num *
(embedding_size - cvm_offset));
platform::GpuLaunchConfig config =
platform::GetGpuLaunchConfig1D(dev_ctx, N);
FusedCVMKernelNoCVM<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
stream>>>(N,
gpu_output_values,
gpu_seqpool_output_values,
batch_size,
(embedding_size - cvm_offset),
cvm_offset);
}
}
// join grad
template <typename T>
__global__ void FusedSeqpoolCVMGradKernelWithCVM(const size_t N,
T **out_grads_values,
T **in_grads_values,
T **cvm_values,
size_t **lods_values,
const int batch_size,
const int embedding_size,
const int cvm_offset) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / embedding_size;
int offset = i % embedding_size; // embedx offset
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
T &val = (offset < cvm_offset)
? *(cvm_values[x] + y * cvm_offset + offset)
: *(out_grads_values[x] + y * embedding_size + offset);
auto &start = *(lods_values[x] + y);
auto &end = *(lods_values[x] + y + 1);
for (auto k = start; k < end; ++k) {
*(in_grads_values[x] + k * embedding_size + offset) = val;
}
}
}
// join only show not has click
template <typename T>
__global__ void FusedSeqpoolCVMGradKernelWithShow(const size_t N,
T **out_grads_values,
T **in_grads_values,
T **cvm_values,
size_t **lods_values,
const int batch_size,
const int embedding_size,
const int cvm_offset) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / embedding_size;
int offset = i % embedding_size; // embedx offset
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
T &val =
(offset < cvm_offset)
? *(cvm_values[x] + y * cvm_offset + offset)
: *(out_grads_values[x] + y * (embedding_size - 1) + offset - 1);
auto &start = *(lods_values[x] + y);
auto &end = *(lods_values[x] + y + 1);
for (auto k = start; k < end; ++k) {
*(in_grads_values[x] + k * embedding_size + offset) = val;
}
}
}
// update grad
template <typename T>
__global__ void FusedSeqpoolCVMGradKernelNoCVM(const size_t N,
T **out_grads_values,
T **in_grads_values,
T **cvm_values,
size_t **lods_values,
const int batch_size,
const int embedding_size,
const int cvm_offset) {
CUDA_KERNEL_LOOP(i, N) {
int key = i / embedding_size;
int offset = i % embedding_size; // embedx offset
int x = key / batch_size; // slot id
int y = key % batch_size; // ins id
T &val = (offset < cvm_offset)
? *(cvm_values[x] + y * cvm_offset + offset)
: *(out_grads_values[x] + y * (embedding_size - cvm_offset) +
offset - cvm_offset);
auto &start = *(lods_values[x] + y);
auto &end = *(lods_values[x] + y + 1);
for (auto k = start; k < end; ++k) {
*(in_grads_values[x] + k * embedding_size + offset) = val;
}
}
}
template <typename T>
void FusedSeqpoolCVMGrad(const framework::ExecutionContext &ctx,
const std::vector<const T *> &out_grads_data,
const std::vector<T *> &in_grads_data,
const std::vector<const T *> &cvm_data,
const std::vector<const size_t *> &lods,
const int batch_size,
const int slot_num,
const int embedding_size,
const bool use_cvm,
const int cvm_offset) {
auto stream = ctx.template device_context<phi::GPUContext>().stream();
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
size_t total_ptr_len = out_grads_data.size() + in_grads_data.size() +
cvm_data.size() + lods.size();
auto temp_ptr =
memory::AllocShared(ctx.GetPlace(), total_ptr_len * sizeof(void *));
#ifdef PADDLE_WITH_HIP
T **gpu_out_grads_values = reinterpret_cast<T **>(temp_ptr->ptr());
platform::GpuMemcpyAsync(gpu_out_grads_values,
out_grads_data.data(),
out_grads_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_in_grads_values =
reinterpret_cast<T **>(&gpu_out_grads_values[out_grads_data.size()]);
platform::GpuMemcpyAsync(gpu_in_grads_values,
in_grads_data.data(),
in_grads_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
T **gpu_cvm_values =
reinterpret_cast<T **>(&gpu_in_grads_values[in_grads_data.size()]);
platform::GpuMemcpyAsync(gpu_cvm_values,
cvm_data.data(),
cvm_data.size() * sizeof(T *),
hipMemcpyHostToDevice,
stream);
size_t **lods_values =
reinterpret_cast<size_t **>(&gpu_cvm_values[cvm_data.size()]);
platform::GpuMemcpyAsync(lods_values,
lods.data(),
lods.size() * sizeof(size_t *),
hipMemcpyHostToDevice,
stream);
#else
T **gpu_out_grads_values = reinterpret_cast<T **>(temp_ptr->ptr());
platform::GpuMemcpyAsync(gpu_out_grads_values,
out_grads_data.data(),
out_grads_data.size() * sizeof(T *),
cudaMemcpyHostToDevice,
stream);
T **gpu_in_grads_values =
reinterpret_cast<T **>(&gpu_out_grads_values[out_grads_data.size()]);
platform::GpuMemcpyAsync(gpu_in_grads_values,
in_grads_data.data(),
in_grads_data.size() * sizeof(T *),
cudaMemcpyHostToDevice,
stream);
T **gpu_cvm_values =
reinterpret_cast<T **>(&gpu_in_grads_values[in_grads_data.size()]);
platform::GpuMemcpyAsync(gpu_cvm_values,
cvm_data.data(),
cvm_data.size() * sizeof(T *),
cudaMemcpyHostToDevice,
stream);
size_t **lods_values =
reinterpret_cast<size_t **>(&gpu_cvm_values[cvm_data.size()]);
platform::GpuMemcpyAsync(lods_values,
lods.data(),
lods.size() * sizeof(size_t *),
cudaMemcpyHostToDevice,
stream);
#endif
size_t N = static_cast<size_t>(batch_size * slot_num * embedding_size);
auto config = platform::GetGpuLaunchConfig1D(dev_ctx, N);
if (use_cvm) {
// join grad
FusedSeqpoolCVMGradKernelWithCVM<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
stream>>>(N,
gpu_out_grads_values,
gpu_in_grads_values,
gpu_cvm_values,
lods_values,
batch_size,
embedding_size,
cvm_offset);
} else {
// update grad
FusedSeqpoolCVMGradKernelNoCVM<<<config.block_per_grid.x,
config.thread_per_block.x,
0,
stream>>>(N,
gpu_out_grads_values,
gpu_in_grads_values,
gpu_cvm_values,
lods_values,
batch_size,
embedding_size,
cvm_offset);
}
}
template <typename T, typename DeviceContext>
class FusedSeqpoolCVMCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto inputs = ctx.MultiInput<phi::DenseTensor>("X");
auto outputs = ctx.MultiOutput<phi::DenseTensor>("Out");
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
const auto slot_size = inputs.size();
std::vector<const float *> input_data(slot_size);
std::vector<const size_t *> lods_data(slot_size);
std::vector<T *> output_data(slot_size);
std::vector<phi::DenseTensor> seqpool_outputs(slot_size);
std::vector<T *> seqpool_output_data(slot_size);
auto padding_value = ctx.Attr<float>("pad_value");
auto use_cvm = ctx.Attr<bool>("use_cvm");
const int cvm_offset = ctx.Attr<int>("cvm_offset");
int embedding_size = inputs[0]->numel() / inputs[0]->dims()[0];
int batch_size = -1;
std::vector<phi::MixVector<size_t> *> mix_lods_v(slot_size);
for (size_t i = 0; i < slot_size; ++i) {
const auto *input = inputs[i];
Vector<size_t> lods;
if (input->lod().size() != 0) {
auto lod = input->lod();
lods = lod[0];
} else {
lods.push_back(0);
for (int i = 0; i < input->dims()[0]; i++) {
lods.push_back(i + 1);
}
}
int cur_batch_size =
input->lod().size() ? input->lod()[0].size() - 1 : input->dims()[0];
if (batch_size == -1) {
batch_size = cur_batch_size;
} else {
PADDLE_ENFORCE_EQ(batch_size,
cur_batch_size,
platform::errors::PreconditionNotMet(
"The batch size of all input should be same, "
"please cheack, last batchsize is %d, current "
"batchsize is %d",
batch_size,
cur_batch_size));
}
input_data[i] = reinterpret_cast<const T *>(input->data<T>());
auto *output = outputs[i];
if (use_cvm) {
output->Resize({batch_size, embedding_size});
} else {
output->Resize({batch_size, embedding_size - cvm_offset});
}
output_data[i] = reinterpret_cast<T *>(
dev_ctx.Alloc<T>(output, output->numel() * sizeof(T)));
mix_lods_v[i] = new phi::MixVector<size_t>(&lods);
lods_data[i] = mix_lods_v[i]->CUDAData(ctx.GetPlace());
seqpool_outputs[i].Resize({batch_size, embedding_size});
seqpool_output_data[i] = reinterpret_cast<T *>(dev_ctx.Alloc<T>(
&seqpool_outputs[i], seqpool_outputs[i].numel() * sizeof(T)));
}
FusedSeqpoolCVM(ctx,
input_data,
output_data,
seqpool_output_data,
lods_data,
batch_size,
slot_size,
embedding_size,
padding_value,
use_cvm,
cvm_offset);
for (int i = 0; i < slot_size; i++) {
delete mix_lods_v[i];
}
}
};
template <typename T, typename DeviceContext>
class FusedSeqpoolCVMGradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
auto out_grads =
ctx.MultiInput<phi::DenseTensor>(framework::GradVarName("Out"));
auto in_grads =
ctx.MultiOutput<phi::DenseTensor>(framework::GradVarName("X"));
auto *cvm = ctx.Input<phi::DenseTensor>("CVM");
auto &dev_ctx = ctx.template device_context<phi::GPUContext>();
std::string pooltype = ctx.Attr<std::string>("pooltype");
auto use_cvm = ctx.Attr<bool>("use_cvm");
const int cvm_offset = ctx.Attr<int>("cvm_offset");
const auto slot_size = in_grads.size();
std::vector<const T *> out_grads_data(slot_size);
std::vector<T *> in_grads_data(slot_size);
std::vector<const T *> cvm_data(slot_size);
std::vector<const size_t *> lods_data(slot_size);
int embedding_size = in_grads[0]->numel() / in_grads[0]->dims()[0];
int batch_size = -1;
std::vector<phi::MixVector<size_t> *> mix_lods_v(slot_size);
for (size_t i = 0; i < slot_size; ++i) {
auto *in_grad = in_grads[i];
Vector<size_t> lods;
if (in_grad->lod().size() != 0) {
auto lod = in_grad->lod();
lods = lod[0];
} else {
lods.push_back(0);
for (int i = 0; i < in_grad->dims()[0]; i++) {
lods.push_back(i + 1);
}
}
int cur_batch_size = in_grad->lod().size() ? in_grad->lod()[0].size() - 1
: in_grad->dims()[0];
if (batch_size == -1) {
batch_size = cur_batch_size;
} else {
PADDLE_ENFORCE_EQ(batch_size,
cur_batch_size,
platform::errors::PreconditionNotMet(
"The batch size of all input should be same, "
"please cheack, last batchsize is %d, current "
"batchsize is %d",
batch_size,
cur_batch_size));
}
auto *out_grad = out_grads[i];
out_grads_data[i] = reinterpret_cast<const T *>(out_grad->data<T>());
in_grads_data[i] = reinterpret_cast<T *>(
dev_ctx.Alloc<T>(in_grad, in_grad->numel() * sizeof(T)));
mix_lods_v[i] = new phi::MixVector<size_t>(&lods);
lods_data[i] = mix_lods_v[i]->CUDAData(ctx.GetPlace());
cvm_data[i] = reinterpret_cast<const T *>(cvm->data<T>());
}
FusedSeqpoolCVMGrad(ctx,
out_grads_data,
in_grads_data,
cvm_data,
lods_data,
batch_size,
slot_size,
embedding_size,
use_cvm,
cvm_offset);
for (int i = 0; i < slot_size; i++) {
delete mix_lods_v[i];
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
PD_REGISTER_STRUCT_KERNEL(
fused_seqpool_cvm, GPU, ALL_LAYOUT, ops::FusedSeqpoolCVMCUDAKernel, float) {
}
PD_REGISTER_STRUCT_KERNEL(fused_seqpool_cvm_grad,
GPU,
ALL_LAYOUT,
ops::FusedSeqpoolCVMGradCUDAKernel,
float) {}
|
344249f6c7675f2554d8524c8a10b144b95b0aa2.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace ndegtwisted {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
#include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass
#endif
#ifndef NDEGTM_SHARED_FLOATS_PER_THREAD
#define NDEGTM_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace twisted
// declare the dslash events
#include <dslash_events.cuh>
using namespace ndegtwisted;
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
template <typename sFloat, typename gFloat>
class NdegTwistedDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistDslashType dslashType;
double a, b, c, d;
protected:
unsigned int sharedBytesPerThread() const
{
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
}
public:
NdegTwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x,
const QudaTwistDslashType dslashType, const double kappa, const double mu,
const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), dslashType(dslashType)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
dslashParam.gauge0 = (void*)gauge0;
dslashParam.gauge1 = (void*)gauge1;
dslashParam.a = kappa;
dslashParam.a_f = kappa;
dslashParam.b = mu;
dslashParam.b_f = mu;
dslashParam.c = epsilon;
dslashParam.c_f = epsilon;
dslashParam.d = k;
dslashParam.d_f = k;
if (dslashType != QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for non-degenerate twisted-mass Dslash");
dslashParam.fl_stride = in->VolumeCB()/2;
}
virtual ~NdegTwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
strcat(key.aux,",NdegDslash");
return key;
}
void apply(const hipStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
}
long long flops() const {
int twisted_flops = 48;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
// twisted-mass flops are done in the interior kernel
flops += twisted_flops * in->VolumeCB();
break;
}
return flops;
}
};
#endif // GPU_NDEG_TWISTED_MASS_DIRAC
#include <dslash_policy.cuh>
void ndegTwistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistDslashType type,
const double &kappa, const double &mu, const double &epsilon,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
inSpinor->allocateGhostBuffer(1);
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i][0] = in->GhostOffset(i,0)/in->FieldOrder();
dslashParam.ghostOffset[i][1] = in->GhostOffset(i,1)/in->FieldOrder();
dslashParam.ghostNormOffset[i][0] = in->GhostNormOffset(i,0);
dslashParam.ghostNormOffset[i][1] = in->GhostNormOffset(i,1);
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = in->GhostFace()[i] / 2;
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new NdegTwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new NdegTwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new NdegTwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
}
#ifndef GPU_COMMS
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
dslash_policy.apply(0);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslashImp;
#endif
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Non-degenerate twisted mass dslash has not been built");
#endif
}
}
| 344249f6c7675f2554d8524c8a10b144b95b0aa2.cu | #include <cstdlib>
#include <cstdio>
#include <string>
#include <iostream>
#include <color_spinor_field.h>
#include <clover_field.h>
// these control the Wilson-type actions
#ifdef GPU_WILSON_DIRAC
//#define DIRECT_ACCESS_LINK
//#define DIRECT_ACCESS_WILSON_SPINOR
//#define DIRECT_ACCESS_WILSON_ACCUM
//#define DIRECT_ACCESS_WILSON_INTER
//#define DIRECT_ACCESS_WILSON_PACK_SPINOR
//#define DIRECT_ACCESS_CLOVER
#endif // GPU_WILSON_DIRAC
#include <quda_internal.h>
#include <dslash_quda.h>
#include <sys/time.h>
#include <blas_quda.h>
#include <face_quda.h>
#include <inline_ptx.h>
namespace quda {
namespace ndegtwisted {
#include <dslash_constants.h>
#include <dslash_textures.h>
#include <dslash_index.cuh>
// Enable shared memory dslash for Fermi architecture
//#define SHARED_WILSON_DSLASH
//#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
#include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass
#endif
#ifndef NDEGTM_SHARED_FLOATS_PER_THREAD
#define NDEGTM_SHARED_FLOATS_PER_THREAD 0
#endif
#include <dslash_quda.cuh>
} // end namespace twisted
// declare the dslash events
#include <dslash_events.cuh>
using namespace ndegtwisted;
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
template <typename sFloat, typename gFloat>
class NdegTwistedDslashCuda : public SharedDslashCuda {
private:
const gFloat *gauge0, *gauge1;
const QudaTwistDslashType dslashType;
double a, b, c, d;
protected:
unsigned int sharedBytesPerThread() const
{
if (dslashParam.kernel_type == INTERIOR_KERNEL) {
int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float));
return NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size;
} else {
return 0;
}
}
public:
NdegTwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1,
const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x,
const QudaTwistDslashType dslashType, const double kappa, const double mu,
const double epsilon, const double k, const int dagger)
: SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), dslashType(dslashType)
{
bindSpinorTex<sFloat>(in, out, x);
a = kappa;
b = mu;
c = epsilon;
d = k;
dslashParam.gauge0 = (void*)gauge0;
dslashParam.gauge1 = (void*)gauge1;
dslashParam.a = kappa;
dslashParam.a_f = kappa;
dslashParam.b = mu;
dslashParam.b_f = mu;
dslashParam.c = epsilon;
dslashParam.c_f = epsilon;
dslashParam.d = k;
dslashParam.d_f = k;
if (dslashType != QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for non-degenerate twisted-mass Dslash");
dslashParam.fl_stride = in->VolumeCB()/2;
}
virtual ~NdegTwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); }
TuneKey tuneKey() const
{
TuneKey key = DslashCuda::tuneKey();
strcat(key.aux,",NdegDslash");
return key;
}
void apply(const cudaStream_t &stream)
{
#ifdef SHARED_WILSON_DSLASH
if (dslashParam.kernel_type == EXTERIOR_KERNEL_X)
errorQuda("Shared dslash does not yet support X-dimension partitioning");
#endif
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam);
}
long long flops() const {
int twisted_flops = 48;
long long flops = DslashCuda::flops();
switch(dslashParam.kernel_type) {
case EXTERIOR_KERNEL_X:
case EXTERIOR_KERNEL_Y:
case EXTERIOR_KERNEL_Z:
case EXTERIOR_KERNEL_T:
case EXTERIOR_KERNEL_ALL:
break;
case INTERIOR_KERNEL:
case KERNEL_POLICY:
// twisted-mass flops are done in the interior kernel
flops += twisted_flops * in->VolumeCB();
break;
}
return flops;
}
};
#endif // GPU_NDEG_TWISTED_MASS_DIRAC
#include <dslash_policy.cuh>
void ndegTwistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge,
const cudaColorSpinorField *in, const int parity, const int dagger,
const cudaColorSpinorField *x, const QudaTwistDslashType type,
const double &kappa, const double &mu, const double &epsilon,
const double &k, const int *commOverride, TimeProfile &profile)
{
inSpinor = (cudaColorSpinorField*)in; // EVIL
inSpinor->allocateGhostBuffer(1);
#ifdef GPU_NDEG_TWISTED_MASS_DIRAC
int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code
int ghost_threads[4] = {0};
int bulk_threads = in->Volume() / 2;
for(int i=0;i<4;i++){
dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary
dslashParam.ghostOffset[i][0] = in->GhostOffset(i,0)/in->FieldOrder();
dslashParam.ghostOffset[i][1] = in->GhostOffset(i,1)/in->FieldOrder();
dslashParam.ghostNormOffset[i][0] = in->GhostNormOffset(i,0);
dslashParam.ghostNormOffset[i][1] = in->GhostNormOffset(i,1);
dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0
ghost_threads[i] = in->GhostFace()[i] / 2;
}
void *gauge0, *gauge1;
bindGaugeTex(gauge, parity, &gauge0, &gauge1);
if (in->Precision() != gauge.Precision())
errorQuda("Mixing gauge and spinor precision not supported");
DslashCuda *dslash = 0;
size_t regSize = sizeof(float);
if (in->Precision() == QUDA_DOUBLE_PRECISION) {
dslash = new NdegTwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
regSize = sizeof(double);
} else if (in->Precision() == QUDA_SINGLE_PRECISION) {
dslash = new NdegTwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
} else if (in->Precision() == QUDA_HALF_PRECISION) {
dslash = new NdegTwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger);
}
#ifndef GPU_COMMS
DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
dslash_policy.apply(0);
#else
DslashPolicyImp* dslashImp = DslashFactory::create(QUDA_GPU_COMMS_DSLASH);
(*dslashImp)(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile);
delete dslashImp;
#endif
delete dslash;
unbindGaugeTex(gauge);
checkCudaError();
#else
errorQuda("Non-degenerate twisted mass dslash has not been built");
#endif
}
}
|
f004eade249f61c374916378d5896ae7089a62a6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "gputimer.h"
#define T 1024
#define AND 0
#define OR 1
#define NAND 2
#define NOR 3
#define XOR 4
#define XNOR 5
__global__ void gate(int *num1, int *num2, int *op, int *res) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int operation = op[i];
int in1 = num1[i];
int in2 = num2[i];
if (operation == AND) res[i] = in1 & in2;
else if (operation == OR) res[i] = in1 | in2;
else if (operation == NAND) res[i] = !(in1 & in2);
else if (operation == NOR) res[i] = !(in1 | in2);
else if (operation == XOR) res[i] = in1 ^ in2;
else if (operation == XNOR) res[i] = !(in1 ^ in2);
else res[i] = -1;
}
int main(int argc, char *argv[]) {
FILE *fp, *out;
char* input_filename = argv[1];
int lines = atoi(argv[2]);
char* output_filename = argv[3];
// Open input file and create output file
fp = fopen(input_filename, "r");
out = fopen(output_filename, "w");
if (fp == NULL){
printf("Could not open file %s",input_filename);
return 1;
}
// Unified memory allocation
char * line = NULL;
int size = lines*sizeof(int);
int *p_bool1, *p_bool2, *p_op, *p_results;
hipMallocManaged(&p_bool1, size);
hipMallocManaged(&p_bool2, size);
hipMallocManaged(&p_op, size);
hipMallocManaged(&p_results, size);
// Read the data in the file
for (int i = 0; i < lines; i++) {
line = NULL;
size_t n = 0;
getline(&line, &n, fp);
p_bool1[i] = (int)line[0]-48;
p_bool2[i] = (int)line[2]-48;
p_op[i] = (int)line[4]-48;
}
GpuTimer timer;
timer.Start();
hipLaunchKernelGGL(( gate) , dim3(lines/T+1),dim3(T), 0, 0, p_bool1, p_bool2, p_op, p_results);
hipDeviceSynchronize();
timer.Stop();
printf("Time elapsed: %f\n", timer.Elapsed());
// Write data to new output file
for (int i = 0; i < lines; i++) {
//printf("result: %d\n", p_results[i]);
fprintf(out, "%d\n", p_results[i]);
//printf("===============\n");
}
hipFree(p_bool1);
hipFree(p_bool2);
hipFree(p_op);
hipFree(p_results);
// Close the files
fclose(fp);
fclose(out);
return 0;
} | f004eade249f61c374916378d5896ae7089a62a6.cu | #include <stdio.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "gputimer.h"
#define T 1024
#define AND 0
#define OR 1
#define NAND 2
#define NOR 3
#define XOR 4
#define XNOR 5
__global__ void gate(int *num1, int *num2, int *op, int *res) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int operation = op[i];
int in1 = num1[i];
int in2 = num2[i];
if (operation == AND) res[i] = in1 & in2;
else if (operation == OR) res[i] = in1 | in2;
else if (operation == NAND) res[i] = !(in1 & in2);
else if (operation == NOR) res[i] = !(in1 | in2);
else if (operation == XOR) res[i] = in1 ^ in2;
else if (operation == XNOR) res[i] = !(in1 ^ in2);
else res[i] = -1;
}
int main(int argc, char *argv[]) {
FILE *fp, *out;
char* input_filename = argv[1];
int lines = atoi(argv[2]);
char* output_filename = argv[3];
// Open input file and create output file
fp = fopen(input_filename, "r");
out = fopen(output_filename, "w");
if (fp == NULL){
printf("Could not open file %s",input_filename);
return 1;
}
// Unified memory allocation
char * line = NULL;
int size = lines*sizeof(int);
int *p_bool1, *p_bool2, *p_op, *p_results;
cudaMallocManaged(&p_bool1, size);
cudaMallocManaged(&p_bool2, size);
cudaMallocManaged(&p_op, size);
cudaMallocManaged(&p_results, size);
// Read the data in the file
for (int i = 0; i < lines; i++) {
line = NULL;
size_t n = 0;
getline(&line, &n, fp);
p_bool1[i] = (int)line[0]-48;
p_bool2[i] = (int)line[2]-48;
p_op[i] = (int)line[4]-48;
}
GpuTimer timer;
timer.Start();
gate <<<lines/T+1,T>>> (p_bool1, p_bool2, p_op, p_results);
cudaDeviceSynchronize();
timer.Stop();
printf("Time elapsed: %f\n", timer.Elapsed());
// Write data to new output file
for (int i = 0; i < lines; i++) {
//printf("result: %d\n", p_results[i]);
fprintf(out, "%d\n", p_results[i]);
//printf("===============\n");
}
cudaFree(p_bool1);
cudaFree(p_bool2);
cudaFree(p_op);
cudaFree(p_results);
// Close the files
fclose(fp);
fclose(out);
return 0;
} |
4f0289688d774b81f4a0374fa8e1ea889b52ccc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "stdio.h"
#define N 10
__host__ void add(int *a, int *b, int *c)
{
int id = 0;
while (id < N)
{
c[id] = a[id] + b[id];
id += 1;
}
}
__global__ void add_p(int *a, int *b, int *c)
{
printf("(%d %d)\n", threadIdx.x, blockIdx.x);
int id = threadIdx.x + blockDim.x * blockIdx.x;
c[id] = a[id] + b[id];
}
int main( void )
{
/*
CUDA task1.1
block/thread parallelize vector sum
*/
int a[N], b[N], c[N];
// fill the arrays 'a' and 'b' on the CPU
for (int i = 0; i < N; i++)
{
a[i] = -i;
b[i] = i * i;
}
//device memory allocation
int *dev_a, *dev_b, *dev_c;
hipMalloc(&dev_a, sizeof(int) * N);
hipMalloc(&dev_b, sizeof(int) * N);
hipMalloc(&dev_c, sizeof(int) * N);
hipMemcpy(dev_a, a, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, sizeof(int) * N, hipMemcpyHostToDevice);
// display the host results
add(a, b, c);
printf("host:\n");
for (int i = 0; i < N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
for (int i = 0; i < N; i++)
{
c[i] = 0;
}
printf("\n");
// display the device (threads) results
printf("device (threads):\n");
hipLaunchKernelGGL(( add_p), dim3(1), dim3(N), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, sizeof(int) * N, hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
for (int i = 0; i < N; i++)
{
c[i] = 0;
// dev_c[i] = 0;
}
printf("\n");
// display the device (blocks) results
printf("device (blocks):\n");
hipLaunchKernelGGL(( add_p), dim3(N), dim3(1), 0, 0, dev_a, dev_b, dev_c);
hipMemcpy(c, dev_c, sizeof(int) * N, hipMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
//freeing device memory
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
} | 4f0289688d774b81f4a0374fa8e1ea889b52ccc6.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include "stdio.h"
#define N 10
__host__ void add(int *a, int *b, int *c)
{
int id = 0;
while (id < N)
{
c[id] = a[id] + b[id];
id += 1;
}
}
__global__ void add_p(int *a, int *b, int *c)
{
printf("(%d %d)\n", threadIdx.x, blockIdx.x);
int id = threadIdx.x + blockDim.x * blockIdx.x;
c[id] = a[id] + b[id];
}
int main( void )
{
/*
CUDA task1.1
block/thread parallelize vector sum
*/
int a[N], b[N], c[N];
// fill the arrays 'a' and 'b' on the CPU
for (int i = 0; i < N; i++)
{
a[i] = -i;
b[i] = i * i;
}
//device memory allocation
int *dev_a, *dev_b, *dev_c;
cudaMalloc(&dev_a, sizeof(int) * N);
cudaMalloc(&dev_b, sizeof(int) * N);
cudaMalloc(&dev_c, sizeof(int) * N);
cudaMemcpy(dev_a, a, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int) * N, cudaMemcpyHostToDevice);
// display the host results
add(a, b, c);
printf("host:\n");
for (int i = 0; i < N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
for (int i = 0; i < N; i++)
{
c[i] = 0;
}
printf("\n");
// display the device (threads) results
printf("device (threads):\n");
add_p<<<1, N>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, sizeof(int) * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
for (int i = 0; i < N; i++)
{
c[i] = 0;
// dev_c[i] = 0;
}
printf("\n");
// display the device (blocks) results
printf("device (blocks):\n");
add_p<<<N, 1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, sizeof(int) * N, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
printf("%d + %d = %d\n", a[i], b[i], c[i]);
}
//freeing device memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
49c3892366435772c2f0f1ba15a7dd2fbdc65609.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "coalesced2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
hipMalloc(&A, XSIZE*YSIZE);
float *C = NULL;
hipMalloc(&C, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
coalesced2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
coalesced2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
coalesced2), dim3(gridBlock),dim3(threadBlock), 0, 0, A,C,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 49c3892366435772c2f0f1ba15a7dd2fbdc65609.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "coalesced2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *A = NULL;
cudaMalloc(&A, XSIZE*YSIZE);
float *C = NULL;
cudaMalloc(&C, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
coalesced2<<<gridBlock,threadBlock>>>(A,C,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
coalesced2<<<gridBlock,threadBlock>>>(A,C,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
coalesced2<<<gridBlock,threadBlock>>>(A,C,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
13b974ec5b6b4eaaa8b41122d028665f0b437a15.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
__global__ void softmax_kernel(float *output, float *input, float *exp_sum, int batch, int channel, int total_size)
{
int N = batch;
int C = channel;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= total_size)
return;
int c_idx = tid % C;
int n_idx = tid / C;
float exp_element = expf(input[tid]);
float exp_sum_n = exp_sum[n_idx];
output[tid] = exp_element / exp_sum_n;
}
void softmax(float *output, float *input, float *exp_sum, int batch, int channel)
{
int N = batch;
int C = channel;
int THREADS_PER_BLOCK = 256;
int TOTAL_SIZE = N * C;
int NUMBER_OF_BLOCKS = (TOTAL_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
softmax_kernel <<< NUMBER_OF_BLOCKS, THREADS_PER_BLOCK >> > (output, input, exp_sum, N, C, TOTAL_SIZE);
} | 13b974ec5b6b4eaaa8b41122d028665f0b437a15.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
__global__ void softmax_kernel(float *output, float *input, float *exp_sum, int batch, int channel, int total_size)
{
int N = batch;
int C = channel;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= total_size)
return;
int c_idx = tid % C;
int n_idx = tid / C;
float exp_element = expf(input[tid]);
float exp_sum_n = exp_sum[n_idx];
output[tid] = exp_element / exp_sum_n;
}
void softmax(float *output, float *input, float *exp_sum, int batch, int channel)
{
int N = batch;
int C = channel;
int THREADS_PER_BLOCK = 256;
int TOTAL_SIZE = N * C;
int NUMBER_OF_BLOCKS = (TOTAL_SIZE + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
softmax_kernel <<< NUMBER_OF_BLOCKS, THREADS_PER_BLOCK >> > (output, input, exp_sum, N, C, TOTAL_SIZE);
} |
165a5764b751103f96b04961309508be84e15580.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
int BLOCK_SIZE = (2 << 7);
__global__ void kUpSweep(int d, int *data) {
int k = (blockDim.x * blockIdx.x) + threadIdx.x;
int exp_d = (int)exp2f(d);
int exp_d1 = (int)exp2f(d+1);
if (k % exp_d1 == 0) {
data[k + exp_d1 - 1] += data[k + exp_d - 1];
}
}
__global__ void kDownSweep(int d, int *data) {
int k = (blockDim.x * blockIdx.x) + threadIdx.x;
if (k % (int)exp2f(d+1) == 0) {
int left = k + (int)exp2f(d) - 1;
int right = k + (int)exp2f(d+1) - 1;
int t = data[left];
data[left] = data[right];
data[right] += t;
}
}
/*
* In-place scan on `dev_idata`, which must be a device memory pointer.
*/
void dv_scan(int n, int *dev_idata) {
int numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
for (int d = 0; d < ilog2ceil(n)-1; d++) {
hipLaunchKernelGGL(( kUpSweep), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, d, dev_idata);
checkCUDAError("scan");
}
int z = 0;
hipMemcpy(&dev_idata[n-1], &z, sizeof(int), hipMemcpyHostToDevice);
for (int d = ilog2ceil(n)-1; d >= 0; d--) {
hipLaunchKernelGGL(( kDownSweep), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, d, dev_idata);
checkCUDAError("scan");
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int size, int *odata, const int *input) {
int *idata;
int n;
if (size & (size-1) != 0) { // if size is not a power of 2
n = (int)exp2f(ilog2ceil(size));
idata = (int*)malloc(n * sizeof(int));
memcpy(idata, input, n * sizeof(int));
for (int j = size; j < n; j++) {
idata[j] = 0;
}
} else {
n = size;
idata = (int*)malloc(n * sizeof(int));
memcpy(idata, input, n * sizeof(int));
}
int array_size = n * sizeof(int);
int *dv_idata;
hipMalloc((void**) &dv_idata, array_size);
hipMemcpy(dv_idata, idata, array_size, hipMemcpyHostToDevice);
dv_scan(n, dv_idata);
hipMemcpy(odata, dv_idata, array_size, hipMemcpyDeviceToHost);
hipFree(dv_idata);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int size, int *odata, const int *input) {
int *idata;
int n;
if (size & (size-1) != 0) { // if size is not a power of 2
n = (int)exp2f(ilog2ceil(size));
idata = (int*)malloc(n * sizeof(int));
memcpy(idata, input, n * sizeof(int));
for (int j = size; j < n; j++) {
idata[j] = 0;
}
} else {
n = size;
idata = (int*)malloc(n * sizeof(int));
memcpy(idata, input, n * sizeof(int));
}
int *dev_indices;
int *dev_odata;
int *dev_idata;
int array_size = n * sizeof(int);
int numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipMalloc((void**) &dev_indices, array_size);
hipMalloc((void**) &dev_odata, array_size);
hipMalloc((void**) &dev_idata, array_size);
hipMemcpy(dev_idata, idata, array_size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( StreamCompaction::Common::kernMapToBoolean), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, n, dev_indices, dev_idata);
int last;
hipMemcpy(&last, dev_indices + n-1, sizeof(int), hipMemcpyDeviceToHost);
dv_scan(n, dev_indices);
int streamSize;
hipMemcpy(&streamSize, dev_indices + n-1, sizeof(int), hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( StreamCompaction::Common::kernScatter), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, n, dev_odata, dev_indices, dev_idata);
hipMemcpy(odata, dev_odata, array_size, hipMemcpyDeviceToHost);
// The kernel always copies the last elt.
// Adjust the size to include it if desired.
if (last == 1) {
streamSize++;
}
return streamSize;
}
}
}
| 165a5764b751103f96b04961309508be84e15580.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "efficient.h"
namespace StreamCompaction {
namespace Efficient {
int BLOCK_SIZE = (2 << 7);
__global__ void kUpSweep(int d, int *data) {
int k = (blockDim.x * blockIdx.x) + threadIdx.x;
int exp_d = (int)exp2f(d);
int exp_d1 = (int)exp2f(d+1);
if (k % exp_d1 == 0) {
data[k + exp_d1 - 1] += data[k + exp_d - 1];
}
}
__global__ void kDownSweep(int d, int *data) {
int k = (blockDim.x * blockIdx.x) + threadIdx.x;
if (k % (int)exp2f(d+1) == 0) {
int left = k + (int)exp2f(d) - 1;
int right = k + (int)exp2f(d+1) - 1;
int t = data[left];
data[left] = data[right];
data[right] += t;
}
}
/*
* In-place scan on `dev_idata`, which must be a device memory pointer.
*/
void dv_scan(int n, int *dev_idata) {
int numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
for (int d = 0; d < ilog2ceil(n)-1; d++) {
kUpSweep<<<numBlocks, BLOCK_SIZE>>>(d, dev_idata);
checkCUDAError("scan");
}
int z = 0;
cudaMemcpy(&dev_idata[n-1], &z, sizeof(int), cudaMemcpyHostToDevice);
for (int d = ilog2ceil(n)-1; d >= 0; d--) {
kDownSweep<<<numBlocks, BLOCK_SIZE>>>(d, dev_idata);
checkCUDAError("scan");
}
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int size, int *odata, const int *input) {
int *idata;
int n;
if (size & (size-1) != 0) { // if size is not a power of 2
n = (int)exp2f(ilog2ceil(size));
idata = (int*)malloc(n * sizeof(int));
memcpy(idata, input, n * sizeof(int));
for (int j = size; j < n; j++) {
idata[j] = 0;
}
} else {
n = size;
idata = (int*)malloc(n * sizeof(int));
memcpy(idata, input, n * sizeof(int));
}
int array_size = n * sizeof(int);
int *dv_idata;
cudaMalloc((void**) &dv_idata, array_size);
cudaMemcpy(dv_idata, idata, array_size, cudaMemcpyHostToDevice);
dv_scan(n, dv_idata);
cudaMemcpy(odata, dv_idata, array_size, cudaMemcpyDeviceToHost);
cudaFree(dv_idata);
}
/**
* Performs stream compaction on idata, storing the result into odata.
* All zeroes are discarded.
*
* @param n The number of elements in idata.
* @param odata The array into which to store elements.
* @param idata The array of elements to compact.
* @returns The number of elements remaining after compaction.
*/
int compact(int size, int *odata, const int *input) {
int *idata;
int n;
if (size & (size-1) != 0) { // if size is not a power of 2
n = (int)exp2f(ilog2ceil(size));
idata = (int*)malloc(n * sizeof(int));
memcpy(idata, input, n * sizeof(int));
for (int j = size; j < n; j++) {
idata[j] = 0;
}
} else {
n = size;
idata = (int*)malloc(n * sizeof(int));
memcpy(idata, input, n * sizeof(int));
}
int *dev_indices;
int *dev_odata;
int *dev_idata;
int array_size = n * sizeof(int);
int numBlocks = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
cudaMalloc((void**) &dev_indices, array_size);
cudaMalloc((void**) &dev_odata, array_size);
cudaMalloc((void**) &dev_idata, array_size);
cudaMemcpy(dev_idata, idata, array_size, cudaMemcpyHostToDevice);
StreamCompaction::Common::kernMapToBoolean<<<numBlocks, BLOCK_SIZE>>>(n, dev_indices, dev_idata);
int last;
cudaMemcpy(&last, dev_indices + n-1, sizeof(int), cudaMemcpyDeviceToHost);
dv_scan(n, dev_indices);
int streamSize;
cudaMemcpy(&streamSize, dev_indices + n-1, sizeof(int), cudaMemcpyDeviceToHost);
StreamCompaction::Common::kernScatter<<<numBlocks, BLOCK_SIZE>>>(n, dev_odata, dev_indices, dev_idata);
cudaMemcpy(odata, dev_odata, array_size, cudaMemcpyDeviceToHost);
// The kernel always copies the last elt.
// Adjust the size to include it if desired.
if (last == 1) {
streamSize++;
}
return streamSize;
}
}
}
|
8596fc734d5b3105feb3c3974dd58589670a926f.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <pthread.h>
#include <cstdlib>
#include <vector>
#define COMPRESSION_BATCH_SIZE 32
using namespace std;
struct ThreadArg {
float *original_data;
long num_elements;
int thread_num;
float *compressed_data;
unsigned int *mask;
};
int n_threads = 8;
long layer_sizes[] = {56l * 56 * 96, 28l * 28 * 96, 27l * 27 * 256, 13l * 13 * 256, 13l * 12 * 384, 13l * 12 * 384, 13l * 13 * 256, 6l * 6 * 256};
int num_layers = 8;
void *compressThread(void *arg) {
ThreadArg *thread_arg = (ThreadArg *)arg;
float *original_data = thread_arg->original_data;
float *compressed_data = thread_arg->compressed_data;
unsigned int *mask = thread_arg->mask;
int thread_num = thread_arg->thread_num;
long num_elements = thread_arg->num_elements;
long start = thread_num * num_elements / n_threads;
long n_compression_batches = num_elements / n_threads / COMPRESSION_BATCH_SIZE;
for (long i = 0; i < n_compression_batches; i++) {
long mask_pos = (i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE;
mask[mask_pos] = 0;
for (long j = i * COMPRESSION_BATCH_SIZE + start; j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) {
if (original_data[j] > 0) {
mask[mask_pos] = (mask[mask_pos] << 1) + 1;
compressed_data[j] = original_data[j];
}
else {
mask[mask_pos] = (mask[mask_pos] << 1);
}
}
}
return NULL;
}
int main() {
int batch_size = 128;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
pthread_t threads[n_threads];
for (int i = 0; i < num_layers; i++) {
layer_sizes[i] *= batch_size;
}
vector<float> compression_times;
float total_milli = 0.0;
for (int j = 0; j < num_layers; j++) {
long num_elements = layer_sizes[j];
float *original_data, *compressed_data;
unsigned int *mask;
hipHostMalloc((void **)&original_data, num_elements * sizeof(float));
// generate data
for (long i = 0; i < num_elements; i++) {
if (rand() % 10 < 3)
original_data[i] = 0;
else
original_data[i] = 1;
}
if (num_elements % n_threads != 0) {
cout << "bad number of threads" << endl;
exit(0);
}
if ((num_elements / n_threads) % COMPRESSION_BATCH_SIZE != 0) {
cout << "bad num_elements or n_threads" << endl;
exit(0);
}
cout << "starting " << j << endl;
hipEventRecord(start);
hipHostMalloc((void **)&compressed_data, num_elements * sizeof(float));
hipHostMalloc((void **)&mask, num_elements / COMPRESSION_BATCH_SIZE * sizeof(unsigned int));
ThreadArg thread_arg[n_threads];
for (int i = 0; i < n_threads; i++) {
thread_arg[i].original_data = original_data;
thread_arg[i].compressed_data = compressed_data;
thread_arg[i].mask = mask;
thread_arg[i].thread_num = i;
thread_arg[i].num_elements = num_elements;
}
for (int i = 0; i < n_threads; i++) {
pthread_create(&threads[i], NULL, &compressThread, (void *)&thread_arg[i]);
}
for (int i = 0; i < n_threads; i++) {
pthread_join(threads[i], NULL);
}
// for (int i = 0; i < 27 * 27 * 256 * 128; i++);
hipEventRecord(stop);
hipEventSynchronize(stop);
float milli;
hipEventElapsedTime(&milli, start, stop);
compression_times.push_back(milli);
total_milli += milli;
// cout << milli << endl;
hipHostFree(original_data);
hipHostFree(compressed_data);
hipHostFree(mask);
}
for (int i = 0; i < num_layers; i++) {
cout << compression_times[i] << endl;
}
cout << total_milli << endl;
} | 8596fc734d5b3105feb3c3974dd58589670a926f.cu | #include <iostream>
#include <pthread.h>
#include <cstdlib>
#include <vector>
#define COMPRESSION_BATCH_SIZE 32
using namespace std;
struct ThreadArg {
float *original_data;
long num_elements;
int thread_num;
float *compressed_data;
unsigned int *mask;
};
int n_threads = 8;
long layer_sizes[] = {56l * 56 * 96, 28l * 28 * 96, 27l * 27 * 256, 13l * 13 * 256, 13l * 12 * 384, 13l * 12 * 384, 13l * 13 * 256, 6l * 6 * 256};
int num_layers = 8;
void *compressThread(void *arg) {
ThreadArg *thread_arg = (ThreadArg *)arg;
float *original_data = thread_arg->original_data;
float *compressed_data = thread_arg->compressed_data;
unsigned int *mask = thread_arg->mask;
int thread_num = thread_arg->thread_num;
long num_elements = thread_arg->num_elements;
long start = thread_num * num_elements / n_threads;
long n_compression_batches = num_elements / n_threads / COMPRESSION_BATCH_SIZE;
for (long i = 0; i < n_compression_batches; i++) {
long mask_pos = (i * COMPRESSION_BATCH_SIZE + start) / COMPRESSION_BATCH_SIZE;
mask[mask_pos] = 0;
for (long j = i * COMPRESSION_BATCH_SIZE + start; j < (i + 1) * COMPRESSION_BATCH_SIZE + start; j++) {
if (original_data[j] > 0) {
mask[mask_pos] = (mask[mask_pos] << 1) + 1;
compressed_data[j] = original_data[j];
}
else {
mask[mask_pos] = (mask[mask_pos] << 1);
}
}
}
return NULL;
}
int main() {
int batch_size = 128;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
pthread_t threads[n_threads];
for (int i = 0; i < num_layers; i++) {
layer_sizes[i] *= batch_size;
}
vector<float> compression_times;
float total_milli = 0.0;
for (int j = 0; j < num_layers; j++) {
long num_elements = layer_sizes[j];
float *original_data, *compressed_data;
unsigned int *mask;
cudaMallocHost((void **)&original_data, num_elements * sizeof(float));
// generate data
for (long i = 0; i < num_elements; i++) {
if (rand() % 10 < 3)
original_data[i] = 0;
else
original_data[i] = 1;
}
if (num_elements % n_threads != 0) {
cout << "bad number of threads" << endl;
exit(0);
}
if ((num_elements / n_threads) % COMPRESSION_BATCH_SIZE != 0) {
cout << "bad num_elements or n_threads" << endl;
exit(0);
}
cout << "starting " << j << endl;
cudaEventRecord(start);
cudaMallocHost((void **)&compressed_data, num_elements * sizeof(float));
cudaMallocHost((void **)&mask, num_elements / COMPRESSION_BATCH_SIZE * sizeof(unsigned int));
ThreadArg thread_arg[n_threads];
for (int i = 0; i < n_threads; i++) {
thread_arg[i].original_data = original_data;
thread_arg[i].compressed_data = compressed_data;
thread_arg[i].mask = mask;
thread_arg[i].thread_num = i;
thread_arg[i].num_elements = num_elements;
}
for (int i = 0; i < n_threads; i++) {
pthread_create(&threads[i], NULL, &compressThread, (void *)&thread_arg[i]);
}
for (int i = 0; i < n_threads; i++) {
pthread_join(threads[i], NULL);
}
// for (int i = 0; i < 27 * 27 * 256 * 128; i++);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milli;
cudaEventElapsedTime(&milli, start, stop);
compression_times.push_back(milli);
total_milli += milli;
// cout << milli << endl;
cudaFreeHost(original_data);
cudaFreeHost(compressed_data);
cudaFreeHost(mask);
}
for (int i = 0; i < num_layers; i++) {
cout << compression_times[i] << endl;
}
cout << total_milli << endl;
} |
6344a555542173f1aa17c7e8e8e59ac09ddacee1.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright 2018 NVIDIA Corporation. All rights reserved. */
//Type-erasure C-style interface for Multi-column Filter, Order-By, and Group-By functionality
#include <gdf/gdf.h>
#include <gdf/utils.h>
#include <gdf/errorutils.h>
///#include "../include/sqls_rtti_comp.hpp" -- CORRECT: put me back
#include "sqls_rtti_comp.hpp"
//using IndexT = int;//okay...
using IndexT = size_t;
namespace{ //annonymus
//helper functions:
//
//flatten AOS info from gdf_columns into SOA (2 arrays):
//(1) column array pointers and (2) types;
//
void soa_col_info(gdf_column* cols, size_t ncols, void** d_cols, int* d_types)
{
std::vector<void*> v_cols(ncols,nullptr);
std::vector<int> v_types(ncols, 0);
for(int i=0;i<ncols;++i)
{
v_cols[i] = cols[i].data;
v_types[i] = cols[i].dtype;
}
void** h_cols = &v_cols[0];
int* h_types = &v_types[0];
hipMemcpy(d_cols, h_cols, ncols*sizeof(void*), hipMemcpyHostToDevice);//TODO: add streams
hipMemcpy(d_types, h_types, ncols*sizeof(int), hipMemcpyHostToDevice);//TODO: add streams
}
template<typename T>
using Vector = thrust::device_vector<T>;
void type_dispatcher(gdf_dtype col_type,
int col_index,
gdf_column** h_cols_in,
gdf_column** h_cols_out,
IndexT* d_indices,
size_t nrows_new)
{
switch( col_type )
{
case GDF_INT8:
{
using ColType = int8_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);//pointer semantics (2)
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT16:
{
using ColType = int16_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT32:
{
using ColType = int32_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT64:
{
using ColType = int64_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_FLOAT32:
{
using ColType = float;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_FLOAT64:
{
using ColType = double;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
default:
assert( false );//type not handled
}
return;// State::True;
}
//copy from a set of gdf_columns: h_cols_in
//of size (#ncols): ncols
//to another set of columns : h_cols_out
//by gathering via array of indices: d_indices
//of size: nrows_new
//
void multi_gather_host(size_t ncols, gdf_column** h_cols_in, gdf_column** h_cols_out, IndexT* d_indices, size_t nrows_new)
{
for(int col_index = 0; col_index<ncols; ++col_index)
{
gdf_dtype col_type = h_cols_in[col_index]->dtype;
type_dispatcher(col_type,
col_index,
h_cols_in,
h_cols_out,
d_indices,
nrows_new);
h_cols_out[col_index]->dtype = col_type;
h_cols_out[col_index]->size = nrows_new;
//TODO: h_cols_out[col_index]->valid
}
}
int dtype_size(gdf_dtype col_type)
{
switch( col_type )
{
case GDF_INT8:
{
using ColType = int8_t;
return sizeof(ColType);
}
case GDF_INT16:
{
using ColType = int16_t;
return sizeof(ColType);
}
case GDF_INT32:
{
using ColType = int32_t;
return sizeof(ColType);
}
case GDF_INT64:
{
using ColType = int64_t;
return sizeof(ColType);
}
case GDF_FLOAT32:
{
using ColType = float;
return sizeof(ColType);
}
case GDF_FLOAT64:
{
using ColType = double;
return sizeof(ColType);
}
default:
assert( false );//type not handled
}
return 0;
}
#ifdef DEBUG_
void run_echo(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specifying if rows are pre-sorted (1) or not (0)
gdf_column agg_in)//in: column to aggregate
{
std::cout<<"############# Echo: #############\n";
std::cout<<"nrows: "<<nrows<<"\n";
std::cout<<"ncols: "<<ncols<<"\n";
std::cout<<"sorted: "<<flag_sorted<<"\n";
std::cout<<"input cols:\n";
for(auto i = 0; i < ncols; ++i)
{
switch(i)
{
case 0:
case 1:
{
std::vector<int32_t> v(nrows);
int32_t* p = &v[0];
hipMemcpy(p, cols[i].data, nrows*sizeof(int32_t), hipMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<int32_t>(std::cout,","));
std::cout<<"\n";
break;
}
case 2:
{
std::vector<double> v(nrows);
double* p = &v[0];
hipMemcpy(p, cols[i].data, nrows*sizeof(double), hipMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<double>(std::cout,","));
std::cout<<"\n";
break;
}
}
}
std::cout<<"col to aggregate on:\n";
std::vector<double> v(nrows);
double* p = &v[0];
hipMemcpy(p, agg_in.data, nrows*sizeof(double), hipMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<double>(std::cout,","));
std::cout<<"\n";
}
#endif
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_count(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
IndexT* d_kout, //out: device-side array of rows after gropu-by
IndexT* d_count, //out: device-side array of aggregated values (COUNT-ed) as a result of group-by;
size_t* new_sz) //out: host-side # rows of d_count
{
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_count,
flag_sorted);
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_sum(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after group-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
#ifdef DEBUG_
run_echo(nrows, //in: # rows
cols, //in: host-side array of gdf_columns
ncols, //in: # cols
flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
agg_in);//in: column to aggregate
#endif
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_min(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_max(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_avg(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
IndexT* d_cout, //out: device-side array of (COUNT-ed) values as a result of group-by;
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
gdf_error gdf_group_by_single(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt, //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
gdf_agg_op op) //aggregation operation
{
if( ncols == 0 )
return GDF_DATASET_EMPTY;
if( ctxt->flag_method == GDF_SORT )
{
std::vector<gdf_column> v_cols(ncols);
for(auto i = 0; i < ncols; ++i)
{
v_cols[i] = *(cols[i]);
}
gdf_column* h_columns = &v_cols[0];
size_t nrows = h_columns[0].size;
if( nrows == 0 )
return GDF_DATASET_EMPTY;
size_t n_group = 0;
Vector<IndexT> d_indx;//allocate only if necessary (see below)
Vector<void*> d_cols(ncols, nullptr);
Vector<int> d_types(ncols, 0);
void** d_col_data = d_cols.data().get();
int* d_col_types = d_types.data().get();
IndexT* ptr_d_indx = nullptr;
if( out_col_indices )
ptr_d_indx = static_cast<IndexT*>(out_col_indices->data);
else
{
d_indx.resize(nrows);
ptr_d_indx = d_indx.data().get();
}
Vector<IndexT> d_sort(nrows, 0);
IndexT* ptr_d_sort = d_sort.data().get();
gdf_column c_agg_p;
c_agg_p.dtype = col_agg->dtype;
c_agg_p.size = nrows;
Vector<char> d_agg_p(nrows * dtype_size(c_agg_p.dtype));//this might be PROBLEMatic (seems harmless)
c_agg_p.data = d_agg_p.data().get();
switch( op )
{
case GDF_SUM:
gdf_group_by_sum(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_MIN:
gdf_group_by_min(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_MAX:
gdf_group_by_max(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_AVG:
{
Vector<IndexT> d_cout(nrows, 0);
IndexT* ptr_d_cout = d_cout.data().get();
gdf_group_by_avg(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_cout, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
}
break;
case GDF_COUNT_DISTINCT:
{
assert( out_col_agg );
//assert( out_col_agg->dtype == GDF_INT64 );//==size_t ?????
assert( out_col_agg->size >= 1);
Vector<IndexT> d_counts(nrows, 0);
IndexT* ptr_d_vals = d_counts.data().get();
gdf_group_by_count(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_indx, //allocated (or, passed in)
ptr_d_vals, //passed in
&n_group);
IndexT* p_out = static_cast<IndexT*>(out_col_agg->data);
p_out[0] = static_cast<IndexT>(n_group);
}
break;
case GDF_COUNT:
{
assert( out_col_agg );
//assert( out_col_agg->dtype == GDF_INT64 );//==size_t ?????
IndexT* ptr_d_vals = static_cast<IndexT*>(out_col_agg->data);
gdf_group_by_count(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_indx, //allocated (or, passed in)
ptr_d_vals, //passed in
&n_group);
}
break;
}
if( out_col_values )
{
multi_gather_host(ncols, cols, out_col_values, ptr_d_indx, n_group);
}
out_col_agg->size = n_group;
if( out_col_indices )
out_col_indices->size = n_group;
//TODO: out_<col>->valid = ?????
}
else if( ctxt->flag_method == GDF_HASH )
{
//TODO:
//HASH-based
}
else
{
return GDF_UNSUPPORTED_METHOD;
}
return GDF_SUCCESS;
}
}//end unknown namespace
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_order_by(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
size_t* d_indx) //out: device-side array of re-rdered row indices
{
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
multi_col_order_by(nrows,
ncols,
d_cols,
d_types,
d_indx);
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_filter(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
void** d_vals, //in: device-side array of values to filter against (type-erased)
size_t* d_indx, //out: device-side array of row indices that remain after filtering
size_t* new_sz) //out: host-side # rows that remain after filtering
{
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
*new_sz = multi_col_filter(nrows,
ncols,
d_cols,
d_types,
d_vals,
d_indx);
return GDF_SUCCESS;
}
gdf_error gdf_group_by_sum(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_SUM);
}
gdf_error gdf_group_by_min(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_MIN);
}
gdf_error gdf_group_by_max(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_MAX);
}
gdf_error gdf_group_by_avg(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_AVG);
}
gdf_error gdf_group_by_count(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
if( ctxt->flag_distinct )
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_COUNT_DISTINCT);
else
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_COUNT);
}
| 6344a555542173f1aa17c7e8e8e59ac09ddacee1.cu | /* Copyright 2018 NVIDIA Corporation. All rights reserved. */
//Type-erasure C-style interface for Multi-column Filter, Order-By, and Group-By functionality
#include <gdf/gdf.h>
#include <gdf/utils.h>
#include <gdf/errorutils.h>
///#include "../include/sqls_rtti_comp.hpp" -- CORRECT: put me back
#include "sqls_rtti_comp.hpp"
//using IndexT = int;//okay...
using IndexT = size_t;
namespace{ //annonymus
//helper functions:
//
//flatten AOS info from gdf_columns into SOA (2 arrays):
//(1) column array pointers and (2) types;
//
void soa_col_info(gdf_column* cols, size_t ncols, void** d_cols, int* d_types)
{
std::vector<void*> v_cols(ncols,nullptr);
std::vector<int> v_types(ncols, 0);
for(int i=0;i<ncols;++i)
{
v_cols[i] = cols[i].data;
v_types[i] = cols[i].dtype;
}
void** h_cols = &v_cols[0];
int* h_types = &v_types[0];
cudaMemcpy(d_cols, h_cols, ncols*sizeof(void*), cudaMemcpyHostToDevice);//TODO: add streams
cudaMemcpy(d_types, h_types, ncols*sizeof(int), cudaMemcpyHostToDevice);//TODO: add streams
}
template<typename T>
using Vector = thrust::device_vector<T>;
void type_dispatcher(gdf_dtype col_type,
int col_index,
gdf_column** h_cols_in,
gdf_column** h_cols_out,
IndexT* d_indices,
size_t nrows_new)
{
switch( col_type )
{
case GDF_INT8:
{
using ColType = int8_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);//pointer semantics (2)
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT16:
{
using ColType = int16_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT32:
{
using ColType = int32_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_INT64:
{
using ColType = int64_t;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_FLOAT32:
{
using ColType = float;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
case GDF_FLOAT64:
{
using ColType = double;
ColType* d_in = static_cast<ColType*>(h_cols_in[col_index]->data);
ColType* d_out = static_cast<ColType*>(h_cols_out[col_index]->data);
thrust::gather(thrust::device,
d_indices, d_indices + nrows_new, //map of indices
d_in, //source
d_out); //=source[map]
break;
}
default:
assert( false );//type not handled
}
return;// State::True;
}
//copy from a set of gdf_columns: h_cols_in
//of size (#ncols): ncols
//to another set of columns : h_cols_out
//by gathering via array of indices: d_indices
//of size: nrows_new
//
void multi_gather_host(size_t ncols, gdf_column** h_cols_in, gdf_column** h_cols_out, IndexT* d_indices, size_t nrows_new)
{
for(int col_index = 0; col_index<ncols; ++col_index)
{
gdf_dtype col_type = h_cols_in[col_index]->dtype;
type_dispatcher(col_type,
col_index,
h_cols_in,
h_cols_out,
d_indices,
nrows_new);
h_cols_out[col_index]->dtype = col_type;
h_cols_out[col_index]->size = nrows_new;
//TODO: h_cols_out[col_index]->valid
}
}
int dtype_size(gdf_dtype col_type)
{
switch( col_type )
{
case GDF_INT8:
{
using ColType = int8_t;
return sizeof(ColType);
}
case GDF_INT16:
{
using ColType = int16_t;
return sizeof(ColType);
}
case GDF_INT32:
{
using ColType = int32_t;
return sizeof(ColType);
}
case GDF_INT64:
{
using ColType = int64_t;
return sizeof(ColType);
}
case GDF_FLOAT32:
{
using ColType = float;
return sizeof(ColType);
}
case GDF_FLOAT64:
{
using ColType = double;
return sizeof(ColType);
}
default:
assert( false );//type not handled
}
return 0;
}
#ifdef DEBUG_
void run_echo(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specifying if rows are pre-sorted (1) or not (0)
gdf_column agg_in)//in: column to aggregate
{
std::cout<<"############# Echo: #############\n";
std::cout<<"nrows: "<<nrows<<"\n";
std::cout<<"ncols: "<<ncols<<"\n";
std::cout<<"sorted: "<<flag_sorted<<"\n";
std::cout<<"input cols:\n";
for(auto i = 0; i < ncols; ++i)
{
switch(i)
{
case 0:
case 1:
{
std::vector<int32_t> v(nrows);
int32_t* p = &v[0];
cudaMemcpy(p, cols[i].data, nrows*sizeof(int32_t), cudaMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<int32_t>(std::cout,","));
std::cout<<"\n";
break;
}
case 2:
{
std::vector<double> v(nrows);
double* p = &v[0];
cudaMemcpy(p, cols[i].data, nrows*sizeof(double), cudaMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<double>(std::cout,","));
std::cout<<"\n";
break;
}
}
}
std::cout<<"col to aggregate on:\n";
std::vector<double> v(nrows);
double* p = &v[0];
cudaMemcpy(p, agg_in.data, nrows*sizeof(double), cudaMemcpyDeviceToHost);
std::copy(v.begin(), v.end(), std::ostream_iterator<double>(std::cout,","));
std::cout<<"\n";
}
#endif
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_count(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
IndexT* d_kout, //out: device-side array of rows after gropu-by
IndexT* d_count, //out: device-side array of aggregated values (COUNT-ed) as a result of group-by;
size_t* new_sz) //out: host-side # rows of d_count
{
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
*new_sz = multi_col_group_by_count_sort(nrows,
ncols,
d_cols,
d_types,
d_indx,
d_kout,
d_count,
flag_sorted);
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_sum(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after group-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
#ifdef DEBUG_
run_echo(nrows, //in: # rows
cols, //in: host-side array of gdf_columns
ncols, //in: # cols
flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
agg_in);//in: column to aggregate
#endif
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_sum_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_min(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_min_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_max(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_max_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's necessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_group_by_avg(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
int flag_sorted, //in: flag specififying if rows are pre-sorted (1) or not (0)
gdf_column& agg_in,//in: column to aggregate
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
IndexT* d_indx, //out: device-side array of row indices after sorting
IndexT* d_cout, //out: device-side array of (COUNT-ed) values as a result of group-by;
gdf_column& agg_p, //out: reordering of d_agg after sorting; requires shallow (trivial) copy-construction (see static_assert below);
IndexT* d_kout, //out: device-side array of rows after gropu-by
gdf_column& c_vout,//out: aggregated column; requires shallow (trivial) copy-construction (see static_assert below);
size_t* new_sz) //out: host-side # rows of d_count
{
//not supported by g++-4.8:
//
//static_assert(std::is_trivially_copy_constructible<gdf_column>::value,
// "error: gdf_column must have shallow copy constructor; otherwise cannot pass output by copy.");
assert( agg_in.dtype == agg_p.dtype );
assert( agg_in.dtype == c_vout.dtype );
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
switch( agg_in.dtype )
{
case GDF_INT8:
{
using T = char;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT16:
{
using T = short;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT32:
{
using T = int;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_INT64:
{
using T = long;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT32:
{
using T = float;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
case GDF_FLOAT64:
{
using T = double;
T* d_agg = static_cast<T*>(agg_in.data);
T* d_agg_p = static_cast<T*>(agg_p.data);
T* d_vout = static_cast<T*>(c_vout.data);
*new_sz = multi_col_group_by_avg_sort(nrows,
ncols,
d_cols,
d_types,
d_agg,
d_indx,
d_cout,
d_agg_p,
d_kout,
d_vout,
flag_sorted);
break;
}
default:
return GDF_UNSUPPORTED_DTYPE;
}
return GDF_SUCCESS;
}
gdf_error gdf_group_by_single(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt, //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
gdf_agg_op op) //aggregation operation
{
if( ncols == 0 )
return GDF_DATASET_EMPTY;
if( ctxt->flag_method == GDF_SORT )
{
std::vector<gdf_column> v_cols(ncols);
for(auto i = 0; i < ncols; ++i)
{
v_cols[i] = *(cols[i]);
}
gdf_column* h_columns = &v_cols[0];
size_t nrows = h_columns[0].size;
if( nrows == 0 )
return GDF_DATASET_EMPTY;
size_t n_group = 0;
Vector<IndexT> d_indx;//allocate only if necessary (see below)
Vector<void*> d_cols(ncols, nullptr);
Vector<int> d_types(ncols, 0);
void** d_col_data = d_cols.data().get();
int* d_col_types = d_types.data().get();
IndexT* ptr_d_indx = nullptr;
if( out_col_indices )
ptr_d_indx = static_cast<IndexT*>(out_col_indices->data);
else
{
d_indx.resize(nrows);
ptr_d_indx = d_indx.data().get();
}
Vector<IndexT> d_sort(nrows, 0);
IndexT* ptr_d_sort = d_sort.data().get();
gdf_column c_agg_p;
c_agg_p.dtype = col_agg->dtype;
c_agg_p.size = nrows;
Vector<char> d_agg_p(nrows * dtype_size(c_agg_p.dtype));//this might be PROBLEMatic (seems harmless)
c_agg_p.data = d_agg_p.data().get();
switch( op )
{
case GDF_SUM:
gdf_group_by_sum(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_MIN:
gdf_group_by_min(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_MAX:
gdf_group_by_max(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
break;
case GDF_AVG:
{
Vector<IndexT> d_cout(nrows, 0);
IndexT* ptr_d_cout = d_cout.data().get();
gdf_group_by_avg(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
*col_agg,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_cout, //allocated
c_agg_p, //allocated
ptr_d_indx, //allocated (or, passed in)
*out_col_agg,
&n_group);
}
break;
case GDF_COUNT_DISTINCT:
{
assert( out_col_agg );
//assert( out_col_agg->dtype == GDF_INT64 );//==size_t ?????
assert( out_col_agg->size >= 1);
Vector<IndexT> d_counts(nrows, 0);
IndexT* ptr_d_vals = d_counts.data().get();
gdf_group_by_count(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_indx, //allocated (or, passed in)
ptr_d_vals, //passed in
&n_group);
IndexT* p_out = static_cast<IndexT*>(out_col_agg->data);
p_out[0] = static_cast<IndexT>(n_group);
}
break;
case GDF_COUNT:
{
assert( out_col_agg );
//assert( out_col_agg->dtype == GDF_INT64 );//==size_t ?????
IndexT* ptr_d_vals = static_cast<IndexT*>(out_col_agg->data);
gdf_group_by_count(nrows,
h_columns,
static_cast<size_t>(ncols),
ctxt->flag_sorted,
d_col_data, //allocated
d_col_types,//allocated
ptr_d_sort, //allocated
ptr_d_indx, //allocated (or, passed in)
ptr_d_vals, //passed in
&n_group);
}
break;
}
if( out_col_values )
{
multi_gather_host(ncols, cols, out_col_values, ptr_d_indx, n_group);
}
out_col_agg->size = n_group;
if( out_col_indices )
out_col_indices->size = n_group;
//TODO: out_<col>->valid = ?????
}
else if( ctxt->flag_method == GDF_HASH )
{
//TODO:
//HASH-based
}
else
{
return GDF_UNSUPPORTED_METHOD;
}
return GDF_SUCCESS;
}
}//end unknown namespace
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_order_by(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
size_t* d_indx) //out: device-side array of re-rdered row indices
{
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
multi_col_order_by(nrows,
ncols,
d_cols,
d_types,
d_indx);
return GDF_SUCCESS;
}
//apparent duplication of info between
//gdf_column array and two arrays:
// d_cols = data slice of gdf_column array;
// d_types = dtype slice of gdf_column array;
//but it's nevessary because the gdf_column array is host
//(even though its data slice is on device)
//
gdf_error gdf_filter(size_t nrows, //in: # rows
gdf_column* cols, //in: host-side array of gdf_columns
size_t ncols, //in: # cols
void** d_cols, //out: pre-allocated device-side array to be filled with gdf_column::data for each column; slicing of gdf_column array (host)
int* d_types, //out: pre-allocated device-side array to be filled with gdf_colum::dtype for each column; slicing of gdf_column array (host)
void** d_vals, //in: device-side array of values to filter against (type-erased)
size_t* d_indx, //out: device-side array of row indices that remain after filtering
size_t* new_sz) //out: host-side # rows that remain after filtering
{
//copy H-D:
//
soa_col_info(cols, ncols, d_cols, d_types);
*new_sz = multi_col_filter(nrows,
ncols,
d_cols,
d_types,
d_vals,
d_indx);
return GDF_SUCCESS;
}
gdf_error gdf_group_by_sum(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_SUM);
}
gdf_error gdf_group_by_min(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_MIN);
}
gdf_error gdf_group_by_max(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_MAX);
}
gdf_error gdf_group_by_avg(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_AVG);
}
gdf_error gdf_group_by_count(int ncols, // # columns
gdf_column** cols, //input cols
gdf_column* col_agg, //column to aggregate on
gdf_column* out_col_indices, //if not null return indices of re-ordered rows
gdf_column** out_col_values, //if not null return the grouped-by columns
//(multi-gather based on indices, which are needed anyway)
gdf_column* out_col_agg, //aggregation result
gdf_context* ctxt) //struct with additional info: bool is_sorted, flag_sort_or_hash, bool flag_count_distinct
{
if( ctxt->flag_distinct )
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_COUNT_DISTINCT);
else
return gdf_group_by_single(ncols, cols, col_agg, out_col_indices, out_col_values, out_col_agg, ctxt, GDF_COUNT);
}
|
12e39416371940d127cf42a05b08fd2f67bbacbf.hip | // !!! This is a file automatically generated by hipify!!!
int main()
{
const unsigned int N = 1048576;
const unsigned int bytes = N * sizeof(int);
int *h_a = (int*)malloc(bytes);
int *d_a;
hipMalloc((int**)&d_a, bytes);
memset(h_a, 0, bytes);
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(h_a, d_a, bytes, hipMemcpyDeviceToHost);
return 0;
} | 12e39416371940d127cf42a05b08fd2f67bbacbf.cu | int main()
{
const unsigned int N = 1048576;
const unsigned int bytes = N * sizeof(int);
int *h_a = (int*)malloc(bytes);
int *d_a;
cudaMalloc((int**)&d_a, bytes);
memset(h_a, 0, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(h_a, d_a, bytes, cudaMemcpyDeviceToHost);
return 0;
} |
62c1d1707995bbd6d915e0ff5ff55ba68d79b99b.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <string>
#include <sstream>
#include <iomanip>
#include <stdexcept>
#include <opencv2/core/utility.hpp>
#include "opencv2/core/cuda.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
bool running;
Mat left_src, right_src;
Mat left, right;
cuda::GpuMat d_left, d_right;
int ndisp = 88;
Ptr<cuda::StereoBM> bm;
bm = cuda::createStereoBM(ndisp);
// Load images
left_src = imread("s1.png");
right_src = imread("s2.png");
cvtColor(left_src, left, COLOR_BGR2GRAY);
cvtColor(right_src, right, COLOR_BGR2GRAY);
d_left.upload(left);
d_right.upload(right);
imshow("left", left);
imshow("right", right);
// Prepare disparity map of specified type
Mat disp(left.size(), CV_8U);
cuda::GpuMat d_disp(left.size(), CV_8U);
cout << endl;
running = true;
while (running)
{
bm->compute(d_left, d_right, d_disp);
// Show results
d_disp.download(disp);
imshow("disparity", (Mat_<uchar>)disp);
waitKey(1);
}
return 0;
}
/*
int main(void) {
// Allocate & initialize host data - run on the host
Mat leftImage = imread("view0.png", 0);
Mat rightImage = imread("view1.png", 0);
if (leftImage.empty() || rightImage.empty()) {
cout << "Error in reading Left or right image" << endl;
}
int ro;
int co;
co = leftImage.cols;
ro = leftImage.rows;
Mat Disparity;
leftImage.at<uchar>(ro - 5, co - 9) = 255;
imshow("left Image", leftImage);
imshow("right Image", rightImage);
cout << "Width is ===>" << co << " and Hight is ====> " << ro << endl;
waitKey(0);
// host copies of a, b, c
Mat *d_leftImage, *d_rightImage, *d_result; // device copies of a, b, c
// Allocate space for device copies of a, b, c
int size = sizeof(d_leftImage);
hipMalloc((void **)&d_leftImage, size);
hipMalloc((void **)&d_leftImage, size);
hipMalloc((void **)&d_result, size);
// Copy a & b from the host to the device
hipMemcpy(d_leftImage, &leftImage, size, hipMemcpyHostToDevice);
hipMemcpy(d_rightImage, &rightImage, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU
add <<< 1, 1 >>>(d_a, d_b, d_c);
// Copy result back to the host
hipMemcpy(&Disparity, d_result, size, hipMemcpyDeviceToHost);
// Cleanup
hipFree(d_leftImage); hipFree(d_rightImage); hipFree(d_result);
return 0;
}
*/
Mat MySSD(Mat &left, Mat &right, int win, int MaxOffset, int ro, int co) {
int halfWin = int(win / 2);
Mat result(ro, co, CV_8UC1, Scalar(255));
for (int i = halfWin; i < ro - halfWin; ++i) {
for (int j = halfWin; j < co - halfWin; ++j) {
int ssd_reserved = 9999635;
int bestOffset = 0;
double offset_adjust = 255 / MaxOffset;
for (int offset = 0; offset <= MaxOffset; ++offset) {
int ssd = 0;
int ssdTemp = 0;
for (int u = -halfWin; u <= halfWin; u++) {
for (int v = -halfWin; v <= halfWin; v++) {
ssdTemp = (left.at<uchar>((i + u), (j + v)) - right.at<uchar>((i + u), (j + v - offset)));
ssdTemp = ssdTemp*ssdTemp;
ssd = ssd + ssdTemp;
}
}
if (ssd < ssd_reserved) {
bestOffset = offset;
ssd_reserved = ssd;
}
}
result.at<uchar>(i, j) = bestOffset* offset_adjust;
}
}
return result;
}
| 62c1d1707995bbd6d915e0ff5ff55ba68d79b99b.cu |
#include <iostream>
#include <string>
#include <sstream>
#include <iomanip>
#include <stdexcept>
#include <opencv2/core/utility.hpp>
#include "opencv2/core/cuda.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
bool running;
Mat left_src, right_src;
Mat left, right;
cuda::GpuMat d_left, d_right;
int ndisp = 88;
Ptr<cuda::StereoBM> bm;
bm = cuda::createStereoBM(ndisp);
// Load images
left_src = imread("s1.png");
right_src = imread("s2.png");
cvtColor(left_src, left, COLOR_BGR2GRAY);
cvtColor(right_src, right, COLOR_BGR2GRAY);
d_left.upload(left);
d_right.upload(right);
imshow("left", left);
imshow("right", right);
// Prepare disparity map of specified type
Mat disp(left.size(), CV_8U);
cuda::GpuMat d_disp(left.size(), CV_8U);
cout << endl;
running = true;
while (running)
{
bm->compute(d_left, d_right, d_disp);
// Show results
d_disp.download(disp);
imshow("disparity", (Mat_<uchar>)disp);
waitKey(1);
}
return 0;
}
/*
int main(void) {
// Allocate & initialize host data - run on the host
Mat leftImage = imread("view0.png", 0);
Mat rightImage = imread("view1.png", 0);
if (leftImage.empty() || rightImage.empty()) {
cout << "Error in reading Left or right image" << endl;
}
int ro;
int co;
co = leftImage.cols;
ro = leftImage.rows;
Mat Disparity;
leftImage.at<uchar>(ro - 5, co - 9) = 255;
imshow("left Image", leftImage);
imshow("right Image", rightImage);
cout << "Width is ===>" << co << " and Hight is ====> " << ro << endl;
waitKey(0);
// host copies of a, b, c
Mat *d_leftImage, *d_rightImage, *d_result; // device copies of a, b, c
// Allocate space for device copies of a, b, c
int size = sizeof(d_leftImage);
cudaMalloc((void **)&d_leftImage, size);
cudaMalloc((void **)&d_leftImage, size);
cudaMalloc((void **)&d_result, size);
// Copy a & b from the host to the device
cudaMemcpy(d_leftImage, &leftImage, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_rightImage, &rightImage, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add <<< 1, 1 >>>(d_a, d_b, d_c);
// Copy result back to the host
cudaMemcpy(&Disparity, d_result, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_leftImage); cudaFree(d_rightImage); cudaFree(d_result);
return 0;
}
*/
Mat MySSD(Mat &left, Mat &right, int win, int MaxOffset, int ro, int co) {
int halfWin = int(win / 2);
Mat result(ro, co, CV_8UC1, Scalar(255));
for (int i = halfWin; i < ro - halfWin; ++i) {
for (int j = halfWin; j < co - halfWin; ++j) {
int ssd_reserved = 9999635;
int bestOffset = 0;
double offset_adjust = 255 / MaxOffset;
for (int offset = 0; offset <= MaxOffset; ++offset) {
int ssd = 0;
int ssdTemp = 0;
for (int u = -halfWin; u <= halfWin; u++) {
for (int v = -halfWin; v <= halfWin; v++) {
ssdTemp = (left.at<uchar>((i + u), (j + v)) - right.at<uchar>((i + u), (j + v - offset)));
ssdTemp = ssdTemp*ssdTemp;
ssd = ssd + ssdTemp;
}
}
if (ssd < ssd_reserved) {
bestOffset = offset;
ssd_reserved = ssd;
}
}
result.at<uchar>(i, j) = bestOffset* offset_adjust;
}
}
return result;
}
|
9a4cf4a8f4dc57cc0ee0590372b102496ed5e879.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Test.h"
__global__ void vecAdd(float *a, float *b, float *c, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n)
{
for(int j=0;j<1000000;++j)
c[i] = a[i] / b[i];
}
}
void Test::addVec_gpu(float *a, float *b, float *c, int n) {
float *d_a, *d_b, *d_c;
size_t size = n * sizeof(float);
hipMalloc(&d_a, size);
hipMalloc(&d_b, size);
hipMalloc(&d_c, size);
hipMemcpy(d_a, a, n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_b, b, n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_c, c, n*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vecAdd), dim3((n/256)+1),dim3(256), 0, 0, d_a,d_b,d_c,n);
hipMemcpy(a, d_a, n*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(b, d_b, n*sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(c, d_c, n*sizeof(float), hipMemcpyDeviceToHost);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
| 9a4cf4a8f4dc57cc0ee0590372b102496ed5e879.cu | #include "Test.h"
__global__ void vecAdd(float *a, float *b, float *c, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n)
{
for(int j=0;j<1000000;++j)
c[i] = a[i] / b[i];
}
}
void Test::addVec_gpu(float *a, float *b, float *c, int n) {
float *d_a, *d_b, *d_c;
size_t size = n * sizeof(float);
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size);
cudaMemcpy(d_a, a, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, n*sizeof(float), cudaMemcpyHostToDevice);
vecAdd<<<(n/256)+1,256>>>(d_a,d_b,d_c,n);
cudaMemcpy(a, d_a, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(b, d_b, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(c, d_c, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
a4165ea9bf0782756dea19c50030a9bdc9802b6d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void projectionProfileCuda( const uint8_t * image, uint32_t rowSize, bool horizontal, uint32_t width, uint32_t height, uint32_t * projection )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
projection[image[y * rowSize + x]] = image[y * rowSize + x];
}
} | a4165ea9bf0782756dea19c50030a9bdc9802b6d.cu | #include "includes.h"
__global__ void projectionProfileCuda( const uint8_t * image, uint32_t rowSize, bool horizontal, uint32_t width, uint32_t height, uint32_t * projection )
{
const uint32_t x = blockDim.x * blockIdx.x + threadIdx.x;
const uint32_t y = blockDim.y * blockIdx.y + threadIdx.y;
if ( x < width && y < height ) {
projection[image[y * rowSize + x]] = image[y * rowSize + x];
}
} |
93457ebd6a2adc272ae2b05a804d3217fe946bd3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `sum`
#include <cudf/detail/reduction_functions.hpp>
#include "simple_hip.cuh"
std::unique_ptr<cudf::scalar> cudf::experimental::reduction::sum(
column_view const& col, cudf::data_type const output_dtype,
rmm::mr::device_memory_resource* mr, hipStream_t stream)
{
using reducer = cudf::experimental::reduction::simple::element_type_dispatcher< cudf::experimental::reduction::op::sum>;
return cudf::experimental::type_dispatcher(col.type(), reducer(), col, output_dtype, mr, stream);
}
| 93457ebd6a2adc272ae2b05a804d3217fe946bd3.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// The translation unit for reduction `sum`
#include <cudf/detail/reduction_functions.hpp>
#include "simple.cuh"
std::unique_ptr<cudf::scalar> cudf::experimental::reduction::sum(
column_view const& col, cudf::data_type const output_dtype,
rmm::mr::device_memory_resource* mr, cudaStream_t stream)
{
using reducer = cudf::experimental::reduction::simple::element_type_dispatcher< cudf::experimental::reduction::op::sum>;
return cudf::experimental::type_dispatcher(col.type(), reducer(), col, output_dtype, mr, stream);
}
|
165ca5cfbf15fe6a346482af75db585db232b12c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/// Assignment 06: Local Register Memory
///
/// Author: Justin Renga
/// Two Kernels -- Same Operation
///
/// Operation: Take an integer (randomly generated) from two input arrays,
/// take their modulo (input1 % input2) and store the result.
///
/// Kernel 1: Use the global memory to perform the operation (using local memory as an
/// intermediate.
/// Kernel 2: Transfer the data from global memory to local memory, perform the operation,
/// then transfer back to global memory
// Bibliography (source-list):
// [1] register.cu
// [2] http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html
// [3] https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
// Declare constant definitions here
#define UINT_SIZE sizeof(unsigned int)
#define INITIAL_DATA 0
#define THREAD_MIN 64
#define THREAD_MAX 4096
// Declare device constant memory here
__constant__ static unsigned int ADDITIVE_VALUES[16];
// Declare global host data here:
unsigned int initializedRNG;
// ---------------------------------------- DEVICE OPERATIONS -----------------------------------------
/// @brief GPU Kernel that utilizes only global data to perform a simple modular division operation. To
/// be used on conjunction with localModularDivide for comparison metrics. Algorithm performed:
/// output = input1 % input2
///
/// @param [ in] input1 The first of the two input arrays to be used in the modular division operation
/// @param [ in] input2 The second of the two input arrays to be used in the modular division operation
/// @param [out] output The array containing the results of the modular division operation
__global__ void globalModularDivide(const unsigned int* const input1,
const unsigned int* const input2,
unsigned int* const output)
{
// Compute the current thread index
unsigned int thread_index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Perform the modular operation and store in the output array (without using local memory)
output[thread_index] = input1[thread_index] % input2[thread_index];
}
/// @brief GPU Kernel that offloads the computations from global data completely, then re-inserts
/// the data back into global memory. To be used with globalModularDivide for comparison metrics.
/// Algorithm performed: output = input1 % input2
///
/// @param [ in] input1 The first of the two input arrays to be used in the modular division operation
/// @param [ in] input2 The second of the two input arrays to be used in the modular division operation
/// @param [out] output The array containing the results of the modular division operation
__global__ void localModularDivide(const unsigned int* const input1,
const unsigned int* const input2,
unsigned int* const output)
{
// Compute the current thread index
unsigned int thread_index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Create local registers to store the intermediate data for the algorithm
unsigned int input1_local = input1[thread_index];
unsigned int input2_local = input2[thread_index];
// Create a local register that will store the result of the algorithm
unsigned int output_local = input1_local % input2_local;
// Store the result of the algorithm into the global array
output[thread_index] = output_local;
}
__global__ void add_values_shared(unsigned int* deviceData, const unsigned int elementCount)
{
// Declare externally defined shared memory
__shared__ unsigned int sharedMemory[THREAD_MAX];
// Compute the current thread index
unsigned int threadIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
// Copy data from the device to the shared memory pool (and perform an operation using constant memory)
sharedMemory[threadIndex] = deviceData[threadIndex];
// Perform thread synchronization
__syncthreads();
unsigned int exponentPrimer = threadIndex % 2;
for (unsigned int i = 0; i < 16; ++i)
{
unsigned int currentConstant = ADDITIVE_VALUES[i];
float value = powf(-1, exponentPrimer) * currentConstant;
sharedMemory[threadIndex] += value;
}
__syncthreads();
// Copy the data from the shared memory back to the device
deviceData[threadIndex] = sharedMemory[elementCount - threadIndex - 1];
}
// ----------------------------------------- HOST OPERATIONS -----------------------------------------
// @brief Initialize the Random number generator and ensure it only initializes one time
__host__ void initializeRandomNumbers()
{
if (initializedRNG == 0)
{
srand(time(NULL));
initializedRNG = 1;
}
}
// @brief Generates a series of random numbers for the provided array, given the number of desired numbers
// and the maximum (exclusive) value.
//
// @param [inout] data The data array that will contain the random numbers
// @param [ in] elementCount The number of elements to store in the data array
// @param [ in] max The maximum random number to use (exclusive)
__host__ void generateRandomNumbers( unsigned int* data,
const unsigned int elementCount,
const unsigned int max)
{
// Check to make sure the RNG has been initialized
if (initializedRNG == 0)
{
// If not, initialize the RNG
initializeRandomNumbers();
}
// Generate random data between 0 and the provided maximum value
for (unsigned int i = 0; i < elementCount; ++i)
{
data[i] = rand() % max;
}
}
__host__ void run_gpu_algorithm(int blockCount, int threadCount)
{
// Step 1: Compute the size of the device array based on the block and thread/per block counts
unsigned int elementCount = threadCount * blockCount;
unsigned int deviceSize = UINT_SIZE * elementCount;
// Step 2: Allocate the necessary host memory (two input arrays and an output array
// (use malloc for the input, and calloc for the output since we want to modify
// the contents of the input PRIOR to executing the GPU kernels, but we want to
// initialize the output to 0 before copying the device output over)
unsigned int* hostInput1 = (unsigned int*) malloc(deviceSize);
unsigned int* hostInput2 = (unsigned int*) malloc(deviceSize);
unsigned int* hostOutput = (unsigned int*) calloc(elementCount, UINT_SIZE);
// Step 3: Populate the input arrays with random data, using the device size as the maximum value
// (the device size is used purely as a convenience number, and to ensure that the maximum
// value has the potential to change between each run)
generateRandomNumbers(hostInput1, elementCount, deviceSize);
generateRandomNumbers(hostInput2, elementCount, deviceSize);
// Step 4: Allocate the GPU memory arrays
unsigned int* deviceInput1;
unsigned int* deviceInput2;
unsigned int* deviceOutput;
hipMalloc((void**)&deviceInput1, deviceSize);
hipMalloc((void**)&deviceInput2, deviceSize);
hipMalloc((void**)&deviceOutput, deviceSize);
// Step 5: Populate the GPU input with the host input data
hipMemcpy(deviceInput1, hostInput1, deviceSize, hipMemcpyHostToDevice);
hipMemcpy(deviceInput2, hostInput2, deviceSize, hipMemcpyHostToDevice);
// Step 6: Set the GPU output with all zeros
hipMemset(deviceOutput, INITIAL_DATA, deviceSize);
// Step 7: Initialize the CUDA event start/stop timers for benchmarking
hipEvent_t stopLocalEvent;
hipEvent_t stopGlobalEvent;
hipEvent_t stopSharedEvent;
hipEvent_t startLocalEvent;
hipEvent_t startGlobalEvent;
hipEvent_t startSharedEvent;
hipEventCreate(&stopLocalEvent);
hipEventCreate(&stopGlobalEvent);
hipEventCreate(&stopSharedEvent);
hipEventCreate(&startLocalEvent);
hipEventCreate(&startGlobalEvent);
hipEventCreate(&startSharedEvent);
// Step 8: Invoke the global algorithm kernel with recording enabled
hipEventRecord(startGlobalEvent);
hipLaunchKernelGGL(( globalModularDivide), dim3(blockCount), dim3(threadCount), 0, 0, deviceInput1, deviceInput2, deviceOutput);
hipEventRecord(stopGlobalEvent);
hipDeviceSynchronize();
hipGetLastError();
// Step 9: Retrieve the output from the global algorithm kernel
hipMemcpy(hostOutput, deviceOutput, deviceSize, hipMemcpyDeviceToHost);
// Step 10: Obtain the ms duration for the global algorithm
hipEventSynchronize(stopGlobalEvent);
float globalTimeMS = 0.0f;
hipEventElapsedTime(&globalTimeMS, startGlobalEvent, stopGlobalEvent);
// Step 11: Invoke the local algorithm kernel with recording enabled
hipEventRecord(startLocalEvent);
hipLaunchKernelGGL(( localModularDivide), dim3(blockCount), dim3(threadCount), 0, 0, deviceInput1, deviceInput2, deviceOutput);
hipEventRecord(stopLocalEvent);
hipDeviceSynchronize();
hipGetLastError();
// Step 12: Retrieve the output from the local algorithm kernel
hipMemcpy(hostOutput, deviceOutput, deviceSize, hipMemcpyDeviceToHost);
// Step 13: Obtain the ms duration for the local algorithm
hipEventSynchronize(stopLocalEvent);
float localTimeMS = 0.0f;
hipEventElapsedTime(&localTimeMS, startLocalEvent, stopLocalEvent);
// Step 14: Upload the constant memory values to the device:
unsigned int* constantMemory = (unsigned int*) malloc(deviceSize);
generateRandomNumbers(constantMemory, elementCount, deviceSize);
hipMemcpyToSymbol(ADDITIVE_VALUES, constantMemory, UINT_SIZE * 16);
// Step 15: Invoke the shared algorithm kernel with recording enabled
hipEventRecord(startSharedEvent);
hipLaunchKernelGGL(( add_values_shared), dim3(blockCount), dim3(threadCount), 0, 0, deviceOutput, elementCount);
hipEventRecord(stopSharedEvent);
hipDeviceSynchronize();
hipGetLastError();
// Step 16: Retrieve the output from the global algorithm kernel
hipMemcpy(hostOutput, deviceOutput, deviceSize, hipMemcpyDeviceToHost);
// Step 17: Obtain the ms duration for the global algorithm
hipEventSynchronize(stopSharedEvent);
float sharedTimeMS = 0.0f;
hipEventElapsedTime(&sharedTimeMS, startSharedEvent, stopSharedEvent);
// Step 18: Display the results of the two operations
printf("Block Count: %d\t Threads Per Block: %d\t", blockCount, threadCount);
printf("Global Duration: %2f ms\t", globalTimeMS);
printf("Shared Duration: %2f ms\t", sharedTimeMS);
printf("Local Duration: %2f ms\n", localTimeMS );
// Step 19: Free device memory:
hipFree(deviceInput1);
hipFree(deviceInput2);
hipFree(deviceOutput);
// Step 20: Free host memory
free(hostInput1);
free(hostInput2);
free(hostOutput);
// Step 21: Free constant memory
free(constantMemory);
}
/// @brief determine if the provided number is a power of two
///
/// @param [in] number The number to validate
///
/// @return True if the provided number is a power of two, false otherwise
__host__ bool isPowerOfTwo(const int number)
{
// Initialize a mask a 00000000 00000000 00000000 00000001 (on 32-bit machines)
int mask = 0x1;
// Iterate over each of the bits in the mask, left shifting by one to
// iterate to the next power of two
for (unsigned int i = 0; i < sizeof(int) * 8; ++i, mask = mask << 1)
{
// Compute the resulting masked value
int maskedValue = number & mask;
// If the computed value is non-zero and is not the provided number,
// the provided number is not a power of two:
//
// For example, 3 would not be a power of two:
// 3 = 00000000 00000000 00000000 00000011
// mask = 00000000 00000000 00000000 00000010
// maskedValue = 00000000 00000000 00000000 00000010
// makedValue is non-zero (2), but is also not provided number (2 != 3)
if (maskedValue != 0 && maskedValue != number)
{
return false;
}
// If the maskedValue is the provided number, then we've confirmed that the
// value is a power of two
if (maskedValue == number)
{
return true;
}
}
// Return false if we've exhausted all possible powers of two the computer can handle
return false;
}
// @brief Display the proper program usage
__host__ void showUsage()
{
printf("Invalid arguments provided. Please see the usage below:\n");
printf(" module_6_jrenga2.exe <bc> <tpb>\n");
printf(" bc - The maximum number of blocks to run with. Must be a positive integer and a power of two.\n");
printf(" tpb - The maximum number of threads per blocks. Must be a positive integer and a power of two.\n");
printf("NOTE: The maximum number of threads (bc * tpb) must be greater than %d \n", THREAD_MIN);
printf(" and less than %d.\n", THREAD_MAX);
printf(" ** TERMINATING **\n");
}
// @brief Main Entry-Point
int main(int argc, char* argv[])
{
// 1. Check the number of arguments.
if (argc != 3)
{
showUsage();
return EXIT_FAILURE;
}
// 2. Attempt to retrieve the integer values of the parameters
// (a value less than or equal to 0 is considered invalid)
int numBlocks = atoi(argv[1]);
if (numBlocks <= 0 || !isPowerOfTwo(numBlocks))
{
showUsage();
return EXIT_FAILURE;
}
int numThreads = atoi(argv[2]);
if (numThreads <= 0 || !isPowerOfTwo(numThreads))
{
showUsage();
return EXIT_FAILURE;
}
int totalThreads = numBlocks * numThreads;
// 2.5 Check to see if the minimum number of threads has been achieved (64)
if (totalThreads < THREAD_MIN || totalThreads > THREAD_MAX)
{
showUsage();
return EXIT_FAILURE;
}
// Do some pre-processing to set up the random number generation
initializedRNG = false;
// Initialize the random numbers
initializeRandomNumbers();
// Iterate from 1 -> numBlocks and 1 -> numThreads to perform metrics on numerous configurations
for (unsigned int blockCount = 1; blockCount <= numBlocks; blockCount = blockCount << 1)
{
for (unsigned int threadCount = 1; threadCount <= numThreads; threadCount = threadCount << 1)
{
run_gpu_algorithm(blockCount, threadCount);
}
}
return EXIT_SUCCESS;
}
| 165ca5cfbf15fe6a346482af75db585db232b12c.cu | /// Assignment 06: Local Register Memory
///
/// Author: Justin Renga
/// Two Kernels -- Same Operation
///
/// Operation: Take an integer (randomly generated) from two input arrays,
/// take their modulo (input1 % input2) and store the result.
///
/// Kernel 1: Use the global memory to perform the operation (using local memory as an
/// intermediate.
/// Kernel 2: Transfer the data from global memory to local memory, perform the operation,
/// then transfer back to global memory
// Bibliography (source-list):
// [1] register.cu
// [2] http://docs.nvidia.com/cuda/cuda-runtime-api/group__CUDART__EVENT.html
// [3] https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
// Declare constant definitions here
#define UINT_SIZE sizeof(unsigned int)
#define INITIAL_DATA 0
#define THREAD_MIN 64
#define THREAD_MAX 4096
// Declare device constant memory here
__constant__ static unsigned int ADDITIVE_VALUES[16];
// Declare global host data here:
unsigned int initializedRNG;
// ---------------------------------------- DEVICE OPERATIONS -----------------------------------------
/// @brief GPU Kernel that utilizes only global data to perform a simple modular division operation. To
/// be used on conjunction with localModularDivide for comparison metrics. Algorithm performed:
/// output = input1 % input2
///
/// @param [ in] input1 The first of the two input arrays to be used in the modular division operation
/// @param [ in] input2 The second of the two input arrays to be used in the modular division operation
/// @param [out] output The array containing the results of the modular division operation
__global__ void globalModularDivide(const unsigned int* const input1,
const unsigned int* const input2,
unsigned int* const output)
{
// Compute the current thread index
unsigned int thread_index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Perform the modular operation and store in the output array (without using local memory)
output[thread_index] = input1[thread_index] % input2[thread_index];
}
/// @brief GPU Kernel that offloads the computations from global data completely, then re-inserts
/// the data back into global memory. To be used with globalModularDivide for comparison metrics.
/// Algorithm performed: output = input1 % input2
///
/// @param [ in] input1 The first of the two input arrays to be used in the modular division operation
/// @param [ in] input2 The second of the two input arrays to be used in the modular division operation
/// @param [out] output The array containing the results of the modular division operation
__global__ void localModularDivide(const unsigned int* const input1,
const unsigned int* const input2,
unsigned int* const output)
{
// Compute the current thread index
unsigned int thread_index = (blockIdx.x * blockDim.x) + threadIdx.x;
// Create local registers to store the intermediate data for the algorithm
unsigned int input1_local = input1[thread_index];
unsigned int input2_local = input2[thread_index];
// Create a local register that will store the result of the algorithm
unsigned int output_local = input1_local % input2_local;
// Store the result of the algorithm into the global array
output[thread_index] = output_local;
}
__global__ void add_values_shared(unsigned int* deviceData, const unsigned int elementCount)
{
// Declare externally defined shared memory
__shared__ unsigned int sharedMemory[THREAD_MAX];
// Compute the current thread index
unsigned int threadIndex = (blockIdx.x * blockDim.x) + threadIdx.x;
// Copy data from the device to the shared memory pool (and perform an operation using constant memory)
sharedMemory[threadIndex] = deviceData[threadIndex];
// Perform thread synchronization
__syncthreads();
unsigned int exponentPrimer = threadIndex % 2;
for (unsigned int i = 0; i < 16; ++i)
{
unsigned int currentConstant = ADDITIVE_VALUES[i];
float value = powf(-1, exponentPrimer) * currentConstant;
sharedMemory[threadIndex] += value;
}
__syncthreads();
// Copy the data from the shared memory back to the device
deviceData[threadIndex] = sharedMemory[elementCount - threadIndex - 1];
}
// ----------------------------------------- HOST OPERATIONS -----------------------------------------
// @brief Initialize the Random number generator and ensure it only initializes one time
__host__ void initializeRandomNumbers()
{
if (initializedRNG == 0)
{
srand(time(NULL));
initializedRNG = 1;
}
}
// @brief Generates a series of random numbers for the provided array, given the number of desired numbers
// and the maximum (exclusive) value.
//
// @param [inout] data The data array that will contain the random numbers
// @param [ in] elementCount The number of elements to store in the data array
// @param [ in] max The maximum random number to use (exclusive)
__host__ void generateRandomNumbers( unsigned int* data,
const unsigned int elementCount,
const unsigned int max)
{
// Check to make sure the RNG has been initialized
if (initializedRNG == 0)
{
// If not, initialize the RNG
initializeRandomNumbers();
}
// Generate random data between 0 and the provided maximum value
for (unsigned int i = 0; i < elementCount; ++i)
{
data[i] = rand() % max;
}
}
__host__ void run_gpu_algorithm(int blockCount, int threadCount)
{
// Step 1: Compute the size of the device array based on the block and thread/per block counts
unsigned int elementCount = threadCount * blockCount;
unsigned int deviceSize = UINT_SIZE * elementCount;
// Step 2: Allocate the necessary host memory (two input arrays and an output array
// (use malloc for the input, and calloc for the output since we want to modify
// the contents of the input PRIOR to executing the GPU kernels, but we want to
// initialize the output to 0 before copying the device output over)
unsigned int* hostInput1 = (unsigned int*) malloc(deviceSize);
unsigned int* hostInput2 = (unsigned int*) malloc(deviceSize);
unsigned int* hostOutput = (unsigned int*) calloc(elementCount, UINT_SIZE);
// Step 3: Populate the input arrays with random data, using the device size as the maximum value
// (the device size is used purely as a convenience number, and to ensure that the maximum
// value has the potential to change between each run)
generateRandomNumbers(hostInput1, elementCount, deviceSize);
generateRandomNumbers(hostInput2, elementCount, deviceSize);
// Step 4: Allocate the GPU memory arrays
unsigned int* deviceInput1;
unsigned int* deviceInput2;
unsigned int* deviceOutput;
cudaMalloc((void**)&deviceInput1, deviceSize);
cudaMalloc((void**)&deviceInput2, deviceSize);
cudaMalloc((void**)&deviceOutput, deviceSize);
// Step 5: Populate the GPU input with the host input data
cudaMemcpy(deviceInput1, hostInput1, deviceSize, cudaMemcpyHostToDevice);
cudaMemcpy(deviceInput2, hostInput2, deviceSize, cudaMemcpyHostToDevice);
// Step 6: Set the GPU output with all zeros
cudaMemset(deviceOutput, INITIAL_DATA, deviceSize);
// Step 7: Initialize the CUDA event start/stop timers for benchmarking
cudaEvent_t stopLocalEvent;
cudaEvent_t stopGlobalEvent;
cudaEvent_t stopSharedEvent;
cudaEvent_t startLocalEvent;
cudaEvent_t startGlobalEvent;
cudaEvent_t startSharedEvent;
cudaEventCreate(&stopLocalEvent);
cudaEventCreate(&stopGlobalEvent);
cudaEventCreate(&stopSharedEvent);
cudaEventCreate(&startLocalEvent);
cudaEventCreate(&startGlobalEvent);
cudaEventCreate(&startSharedEvent);
// Step 8: Invoke the global algorithm kernel with recording enabled
cudaEventRecord(startGlobalEvent);
globalModularDivide<<<blockCount, threadCount>>>(deviceInput1, deviceInput2, deviceOutput);
cudaEventRecord(stopGlobalEvent);
cudaThreadSynchronize();
cudaGetLastError();
// Step 9: Retrieve the output from the global algorithm kernel
cudaMemcpy(hostOutput, deviceOutput, deviceSize, cudaMemcpyDeviceToHost);
// Step 10: Obtain the ms duration for the global algorithm
cudaEventSynchronize(stopGlobalEvent);
float globalTimeMS = 0.0f;
cudaEventElapsedTime(&globalTimeMS, startGlobalEvent, stopGlobalEvent);
// Step 11: Invoke the local algorithm kernel with recording enabled
cudaEventRecord(startLocalEvent);
localModularDivide<<<blockCount, threadCount>>>(deviceInput1, deviceInput2, deviceOutput);
cudaEventRecord(stopLocalEvent);
cudaThreadSynchronize();
cudaGetLastError();
// Step 12: Retrieve the output from the local algorithm kernel
cudaMemcpy(hostOutput, deviceOutput, deviceSize, cudaMemcpyDeviceToHost);
// Step 13: Obtain the ms duration for the local algorithm
cudaEventSynchronize(stopLocalEvent);
float localTimeMS = 0.0f;
cudaEventElapsedTime(&localTimeMS, startLocalEvent, stopLocalEvent);
// Step 14: Upload the constant memory values to the device:
unsigned int* constantMemory = (unsigned int*) malloc(deviceSize);
generateRandomNumbers(constantMemory, elementCount, deviceSize);
cudaMemcpyToSymbol(ADDITIVE_VALUES, constantMemory, UINT_SIZE * 16);
// Step 15: Invoke the shared algorithm kernel with recording enabled
cudaEventRecord(startSharedEvent);
add_values_shared<<<blockCount, threadCount>>>(deviceOutput, elementCount);
cudaEventRecord(stopSharedEvent);
cudaThreadSynchronize();
cudaGetLastError();
// Step 16: Retrieve the output from the global algorithm kernel
cudaMemcpy(hostOutput, deviceOutput, deviceSize, cudaMemcpyDeviceToHost);
// Step 17: Obtain the ms duration for the global algorithm
cudaEventSynchronize(stopSharedEvent);
float sharedTimeMS = 0.0f;
cudaEventElapsedTime(&sharedTimeMS, startSharedEvent, stopSharedEvent);
// Step 18: Display the results of the two operations
printf("Block Count: %d\t Threads Per Block: %d\t", blockCount, threadCount);
printf("Global Duration: %2f ms\t", globalTimeMS);
printf("Shared Duration: %2f ms\t", sharedTimeMS);
printf("Local Duration: %2f ms\n", localTimeMS );
// Step 19: Free device memory:
cudaFree(deviceInput1);
cudaFree(deviceInput2);
cudaFree(deviceOutput);
// Step 20: Free host memory
free(hostInput1);
free(hostInput2);
free(hostOutput);
// Step 21: Free constant memory
free(constantMemory);
}
/// @brief determine if the provided number is a power of two
///
/// @param [in] number The number to validate
///
/// @return True if the provided number is a power of two, false otherwise
__host__ bool isPowerOfTwo(const int number)
{
// Initialize a mask a 00000000 00000000 00000000 00000001 (on 32-bit machines)
int mask = 0x1;
// Iterate over each of the bits in the mask, left shifting by one to
// iterate to the next power of two
for (unsigned int i = 0; i < sizeof(int) * 8; ++i, mask = mask << 1)
{
// Compute the resulting masked value
int maskedValue = number & mask;
// If the computed value is non-zero and is not the provided number,
// the provided number is not a power of two:
//
// For example, 3 would not be a power of two:
// 3 = 00000000 00000000 00000000 00000011
// mask = 00000000 00000000 00000000 00000010
// maskedValue = 00000000 00000000 00000000 00000010
// makedValue is non-zero (2), but is also not provided number (2 != 3)
if (maskedValue != 0 && maskedValue != number)
{
return false;
}
// If the maskedValue is the provided number, then we've confirmed that the
// value is a power of two
if (maskedValue == number)
{
return true;
}
}
// Return false if we've exhausted all possible powers of two the computer can handle
return false;
}
// @brief Display the proper program usage
__host__ void showUsage()
{
printf("Invalid arguments provided. Please see the usage below:\n");
printf(" module_6_jrenga2.exe <bc> <tpb>\n");
printf(" bc - The maximum number of blocks to run with. Must be a positive integer and a power of two.\n");
printf(" tpb - The maximum number of threads per blocks. Must be a positive integer and a power of two.\n");
printf("NOTE: The maximum number of threads (bc * tpb) must be greater than %d \n", THREAD_MIN);
printf(" and less than %d.\n", THREAD_MAX);
printf(" ** TERMINATING **\n");
}
// @brief Main Entry-Point
int main(int argc, char* argv[])
{
// 1. Check the number of arguments.
if (argc != 3)
{
showUsage();
return EXIT_FAILURE;
}
// 2. Attempt to retrieve the integer values of the parameters
// (a value less than or equal to 0 is considered invalid)
int numBlocks = atoi(argv[1]);
if (numBlocks <= 0 || !isPowerOfTwo(numBlocks))
{
showUsage();
return EXIT_FAILURE;
}
int numThreads = atoi(argv[2]);
if (numThreads <= 0 || !isPowerOfTwo(numThreads))
{
showUsage();
return EXIT_FAILURE;
}
int totalThreads = numBlocks * numThreads;
// 2.5 Check to see if the minimum number of threads has been achieved (64)
if (totalThreads < THREAD_MIN || totalThreads > THREAD_MAX)
{
showUsage();
return EXIT_FAILURE;
}
// Do some pre-processing to set up the random number generation
initializedRNG = false;
// Initialize the random numbers
initializeRandomNumbers();
// Iterate from 1 -> numBlocks and 1 -> numThreads to perform metrics on numerous configurations
for (unsigned int blockCount = 1; blockCount <= numBlocks; blockCount = blockCount << 1)
{
for (unsigned int threadCount = 1; threadCount <= numThreads; threadCount = threadCount << 1)
{
run_gpu_algorithm(blockCount, threadCount);
}
}
return EXIT_SUCCESS;
}
|
primosgpu.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#define N 10000000
#define THREADS_PER_BLOCK 1000
//cambia todos los numeros pares excepto el 2
__global__ void pares(char *a, int raiz)
{
//calcular index que este thread revisara
int index = blockIdx.x * blockDim.x + (threadIdx.x * 2);
//para que se salte el 2
if (index == 2)
return;
if (index < N)
a[index] = 1;
}
//para revisar los impares
__global__ void impares(char *a, int raiz)
{
//para que se salte el 1
int index = blockIdx.x * blockDim.x + (threadIdx.x * 2) + 1;
if (index == 1)
return;
//revisa si el numero ya fue revisado
if (a[index] == 0)
{
int j;
if (index <= raiz)
for (j=index*index; j<N; j+=index)
a[j] = 1;
}
}
int main()
{
//arreglo en CPU/RAM
char *a = new char[N];
//arreglo para el device
char *d_a;
//la raiz del numero maximo
int raiz = sqrt(N);
//tamanio del arreglo
int size = N * sizeof( char );
//tiempos
float tiempo1, tiempo2;
hipEvent_t inicio1, fin1, inicio2, fin2; // para medir tiempos como con timestamp
/* allocate space for host copies of a, b, c and setup input alues */
//a = (char *)malloc( size );
for( int i = 0; i < N; i++ )
a[i] = 0;
//cambia el 0 y el 1 (casos especiales )
a[0] = 1;
a[1] = 1;
//empieza a tomar tiempo
hipEventCreate(&inicio1); // Se inicializan
hipEventCreate(&fin1);
hipEventRecord( inicio1, 0 ); // Se toma el tiempo de inicio
/* allocate space for device copies of a, b, c */
hipMalloc( (void **) &d_a, size );
/* copy inputs to deice */
/* fix the parameters needed to copy data to the device */
hipMemcpy( d_a, a, size, hipMemcpyHostToDevice );
//empieza a tomar tiempo
hipEventCreate(&inicio2); // Se inicializan
hipEventCreate(&fin2);
hipEventRecord( inicio2, 0 ); // Se toma el tiempo de inicio
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( pares), dim3(N / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK) , 0, 0, d_a, raiz );
hipLaunchKernelGGL(( impares), dim3(N / THREADS_PER_BLOCK), dim3(THREADS_PER_BLOCK) , 0, 0, d_a, raiz );
hipEventRecord( fin2, 0); // Se toma el tiempo final.
hipEventSynchronize( fin2 ); // Se sincroniza
hipEventElapsedTime( &tiempo2, inicio2, fin2 );
/* copy result back to host */
/* fix the parameters needed to copy data back to the host */
hipMemcpy( a, d_a, size, hipMemcpyDeviceToHost );
//libera memoria
hipFree( d_a );
hipEventRecord( fin1, 0); // Se toma el tiempo final.
hipEventSynchronize( fin1 ); // Se sincroniza
hipEventElapsedTime( &tiempo1, inicio1, fin1 );
//cuenta cuantos primos hay
int cuantos=0;
for (int i=0; i<N; i++)
{
if(a[i] == 0)
{
printf( "%d\n", i);
cuantos++;
}
}
printf( "cantidad de numeros primos: %d\n", cuantos);
/* clean up */
free(a);
printf("Tiempo clculo %f ms\n", tiempo2);
printf("Tiempo total %f ms\n", tiempo1);
return 0;
} /* end main */
| primosgpu.cu | #include <stdio.h>
#include <math.h>
#define N 10000000
#define THREADS_PER_BLOCK 1000
//cambia todos los numeros pares excepto el 2
__global__ void pares(char *a, int raiz)
{
//calcular index que este thread revisara
int index = blockIdx.x * blockDim.x + (threadIdx.x * 2);
//para que se salte el 2
if (index == 2)
return;
if (index < N)
a[index] = 1;
}
//para revisar los impares
__global__ void impares(char *a, int raiz)
{
//para que se salte el 1
int index = blockIdx.x * blockDim.x + (threadIdx.x * 2) + 1;
if (index == 1)
return;
//revisa si el numero ya fue revisado
if (a[index] == 0)
{
int j;
if (index <= raiz)
for (j=index*index; j<N; j+=index)
a[j] = 1;
}
}
int main()
{
//arreglo en CPU/RAM
char *a = new char[N];
//arreglo para el device
char *d_a;
//la raiz del numero maximo
int raiz = sqrt(N);
//tamanio del arreglo
int size = N * sizeof( char );
//tiempos
float tiempo1, tiempo2;
cudaEvent_t inicio1, fin1, inicio2, fin2; // para medir tiempos como con timestamp
/* allocate space for host copies of a, b, c and setup input alues */
//a = (char *)malloc( size );
for( int i = 0; i < N; i++ )
a[i] = 0;
//cambia el 0 y el 1 (casos especiales )
a[0] = 1;
a[1] = 1;
//empieza a tomar tiempo
cudaEventCreate(&inicio1); // Se inicializan
cudaEventCreate(&fin1);
cudaEventRecord( inicio1, 0 ); // Se toma el tiempo de inicio
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
/* copy inputs to deice */
/* fix the parameters needed to copy data to the device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
//empieza a tomar tiempo
cudaEventCreate(&inicio2); // Se inicializan
cudaEventCreate(&fin2);
cudaEventRecord( inicio2, 0 ); // Se toma el tiempo de inicio
/* launch the kernel on the GPU */
pares<<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( d_a, raiz );
impares<<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( d_a, raiz );
cudaEventRecord( fin2, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin2 ); // Se sincroniza
cudaEventElapsedTime( &tiempo2, inicio2, fin2 );
/* copy result back to host */
/* fix the parameters needed to copy data back to the host */
cudaMemcpy( a, d_a, size, cudaMemcpyDeviceToHost );
//libera memoria
cudaFree( d_a );
cudaEventRecord( fin1, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin1 ); // Se sincroniza
cudaEventElapsedTime( &tiempo1, inicio1, fin1 );
//cuenta cuantos primos hay
int cuantos=0;
for (int i=0; i<N; i++)
{
if(a[i] == 0)
{
printf( "%d\n", i);
cuantos++;
}
}
printf( "cantidad de numeros primos: %d\n", cuantos);
/* clean up */
free(a);
printf("Tiempo cálculo %f ms\n", tiempo2);
printf("Tiempo total %f ms\n", tiempo1);
return 0;
} /* end main */
|
1ff7c55877534d4259f99aec8ccb29b7049f5462.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kSoftMaxCrossEntropyRowMajor.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
hipMalloc(&mat, XSIZE*YSIZE);
float *labels = NULL;
hipMalloc(&labels, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
unsigned int width = 1;
unsigned int height = 1;
float tiny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kSoftMaxCrossEntropyRowMajor), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,labels,target,width,height,tiny);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kSoftMaxCrossEntropyRowMajor), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,labels,target,width,height,tiny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kSoftMaxCrossEntropyRowMajor), dim3(gridBlock),dim3(threadBlock), 0, 0, mat,labels,target,width,height,tiny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 1ff7c55877534d4259f99aec8ccb29b7049f5462.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kSoftMaxCrossEntropyRowMajor.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *mat = NULL;
cudaMalloc(&mat, XSIZE*YSIZE);
float *labels = NULL;
cudaMalloc(&labels, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
unsigned int width = 1;
unsigned int height = 1;
float tiny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kSoftMaxCrossEntropyRowMajor<<<gridBlock,threadBlock>>>(mat,labels,target,width,height,tiny);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kSoftMaxCrossEntropyRowMajor<<<gridBlock,threadBlock>>>(mat,labels,target,width,height,tiny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kSoftMaxCrossEntropyRowMajor<<<gridBlock,threadBlock>>>(mat,labels,target,width,height,tiny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d82abbec64106641cbb106b28b5b888eedfdc0f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
#define ARRAY_SIZE 1000000
#define blocksize 256
#define TRUE 1
#define FALSE 0
void cpu_saxpy(int n, float a, float *x, float *y)
{
for(int i = 0; i < n; i++)
y[i] = a*x[i] + y[i];
}
__global__ void gpu_saxpy(int n, float a, float *x, float *y)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n) y[i] = a*x[i] + y[i];
}
int cmp_saxpy(int n, float *cpu_y, float *gpu_y)
{
const float round_err = 0.0001;
for(int i = 0; i < ARRAY_SIZE ; i++)
{
if(fabs(cpu_y[i] - gpu_y[i]) >= round_err)
return FALSE;
}
return TRUE;
}
double timeeval(struct timeval t0, struct timeval t1)
{
return (double)(t1.tv_sec - t0.tv_sec) * 1000.0L + (double)(t1.tv_usec - t0.tv_usec) / 1000.0L;
}
int main()
{
srand((unsigned int)time(NULL));
float MAX_GEN = 50.0;
float *x_gpu, *y_gpu, *x_cpu, *y_cpu;
// float *x_gpu, *y_gpu;
hipMalloc(&x_gpu, ARRAY_SIZE*sizeof(float));
hipMalloc(&y_gpu, ARRAY_SIZE*sizeof(float));
float y_cpu_res[ARRAY_SIZE];
x_cpu = (float*)malloc(ARRAY_SIZE*sizeof(float));
y_cpu = (float*)malloc(ARRAY_SIZE*sizeof(float));
for(int i = 0; i < ARRAY_SIZE; i++)
{
x_cpu[i] = (float)rand()/(float)(RAND_MAX/MAX_GEN);
y_cpu[i] = (float)rand()/(float)(RAND_MAX/MAX_GEN);
}
struct timeval start, end;
//move data from cpu to gpu
hipMemcpy(x_gpu, x_cpu, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(y_gpu, y_cpu, ARRAY_SIZE*sizeof(float), hipMemcpyHostToDevice);
gettimeofday(&start, NULL);
cpu_saxpy(ARRAY_SIZE,1.0f, x_cpu, y_cpu);
gettimeofday(&end, NULL);
printf("Computing SAXPY on the CPU Done!\n");
printf("CPU saxpy: %f milliseconds.\n", timeeval(start, end));
gettimeofday(&start, NULL);
hipLaunchKernelGGL(( gpu_saxpy), dim3((ARRAY_SIZE + blocksize - 1)/blocksize), dim3(blocksize), 0, 0, ARRAY_SIZE, 1.0f, x_gpu, y_gpu);
gettimeofday(&end, NULL);
printf("Computing SAXPY on the GPU Done!\n");
printf("GPU saxpy: %f milliseconds.\n", timeeval(start, end));
//move data from gpu to cpu
hipMemcpy(y_cpu_res, y_gpu, ARRAY_SIZE*sizeof(float), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
if(cmp_saxpy(ARRAY_SIZE, y_cpu, y_cpu_res)==TRUE)
printf("Succesful\n");
else
printf("Failed\n");
hipFree(x_gpu);
hipFree(y_gpu);
return 0;
}
| d82abbec64106641cbb106b28b5b888eedfdc0f8.cu | #include <stdio.h>
#include <sys/time.h>
#define ARRAY_SIZE 1000000
#define blocksize 256
#define TRUE 1
#define FALSE 0
void cpu_saxpy(int n, float a, float *x, float *y)
{
for(int i = 0; i < n; i++)
y[i] = a*x[i] + y[i];
}
__global__ void gpu_saxpy(int n, float a, float *x, float *y)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n) y[i] = a*x[i] + y[i];
}
int cmp_saxpy(int n, float *cpu_y, float *gpu_y)
{
const float round_err = 0.0001;
for(int i = 0; i < ARRAY_SIZE ; i++)
{
if(fabs(cpu_y[i] - gpu_y[i]) >= round_err)
return FALSE;
}
return TRUE;
}
double timeeval(struct timeval t0, struct timeval t1)
{
return (double)(t1.tv_sec - t0.tv_sec) * 1000.0L + (double)(t1.tv_usec - t0.tv_usec) / 1000.0L;
}
int main()
{
srand((unsigned int)time(NULL));
float MAX_GEN = 50.0;
float *x_gpu, *y_gpu, *x_cpu, *y_cpu;
// float *x_gpu, *y_gpu;
cudaMalloc(&x_gpu, ARRAY_SIZE*sizeof(float));
cudaMalloc(&y_gpu, ARRAY_SIZE*sizeof(float));
float y_cpu_res[ARRAY_SIZE];
x_cpu = (float*)malloc(ARRAY_SIZE*sizeof(float));
y_cpu = (float*)malloc(ARRAY_SIZE*sizeof(float));
for(int i = 0; i < ARRAY_SIZE; i++)
{
x_cpu[i] = (float)rand()/(float)(RAND_MAX/MAX_GEN);
y_cpu[i] = (float)rand()/(float)(RAND_MAX/MAX_GEN);
}
struct timeval start, end;
//move data from cpu to gpu
cudaMemcpy(x_gpu, x_cpu, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_gpu, y_cpu, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice);
gettimeofday(&start, NULL);
cpu_saxpy(ARRAY_SIZE,1.0f, x_cpu, y_cpu);
gettimeofday(&end, NULL);
printf("Computing SAXPY on the CPU… Done!\n");
printf("CPU saxpy: %f milliseconds.\n", timeeval(start, end));
gettimeofday(&start, NULL);
gpu_saxpy<<<(ARRAY_SIZE + blocksize - 1)/blocksize, blocksize>>>(ARRAY_SIZE, 1.0f, x_gpu, y_gpu);
gettimeofday(&end, NULL);
printf("Computing SAXPY on the GPU… Done!\n");
printf("GPU saxpy: %f milliseconds.\n", timeeval(start, end));
//move data from gpu to cpu
cudaMemcpy(y_cpu_res, y_gpu, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
if(cmp_saxpy(ARRAY_SIZE, y_cpu, y_cpu_res)==TRUE)
printf("Succesful\n");
else
printf("Failed\n");
cudaFree(x_gpu);
cudaFree(y_gpu);
return 0;
}
|
9a37fcbfe6b6738122c8f5b483e930d8dde20874.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void max_pool3d_backward(int B, int N, int M, int C, const int* maxIndex, const float* gradOutput, float* gradInput)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=threadIdx.x;j<M*C;j+=blockDim.x)
{
int c = j%C;
int n = maxIndex[i*M*C+j];
atomicAdd(&gradInput[i*N*C+n*C+c],gradOutput[i*M*C+j]);
}
}
} | 9a37fcbfe6b6738122c8f5b483e930d8dde20874.cu | #include "includes.h"
__global__ void max_pool3d_backward(int B, int N, int M, int C, const int* maxIndex, const float* gradOutput, float* gradInput)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=threadIdx.x;j<M*C;j+=blockDim.x)
{
int c = j%C;
int n = maxIndex[i*M*C+j];
atomicAdd(&gradInput[i*N*C+n*C+c],gradOutput[i*M*C+j]);
}
}
} |
ecae018638ce1e1343ac7fa5e3212a0160531447.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Brooke Husic and Jared Dunnmon
* Final project CME 253
* Due Feb 17 2017
*/
#include <fstream>
#include <iostream>
#include <math.h>
#include <vector>
#include <chrono>
#include "./debug.h"
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (hipPeekAtLastError()) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
typedef std::chrono::high_resolution_clock Clock;
/* input protein file and get its xyz coordinates */
void ProteinSetup(std::string protein_inputfile,
std::vector<int>& prot_atomnums,
std::vector<int>& prot_resnums,
std::vector<std::vector<double>>& prot_xyz_coords){
std::ifstream f(protein_inputfile.c_str());
if (f.is_open()) {
std::string klass, code, resname, chain;
int atomnum, resnum;
double x, y, z, occ, temp;
while (f >> klass >> atomnum >> code >> resname
>> chain >> resnum >> x >> y >> z
>> occ >> temp){
std::vector<double> temp_coord;
temp_coord.push_back(x);
temp_coord.push_back(y);
temp_coord.push_back(z);
prot_atomnums.push_back(atomnum);
prot_resnums.push_back(resnum);
prot_xyz_coords.push_back(temp_coord);
}
// some checks
if(prot_atomnums.size() != prot_resnums.size()){
std::cerr << "ERROR: Problem in protein file" << std::endl;
}
if(prot_atomnums.size() != prot_xyz_coords.size()){
std::cerr << "ERROR: Problem in protein file" << std::endl;
}
}
std::cout << "Lines in protein file : " << prot_atomnums.size() << std::endl;
}
/* input ligand file and get its xyz coordinates */
void LigandTrajSetup(std::string ligand_inputfile,
std::vector<int>& lig_trajnums,
std::vector<int>& lig_atomnums,
std::vector<int>& lig_resnums,
std::vector<std::vector<double>>& lig_xyz_coords){
std::ifstream f(ligand_inputfile.c_str());
if (f.is_open()) {
std::string klass, code, resname, chain;
int trajnum, atomnum, resnum;
double x, y, z, occ, temp;
while (f >> trajnum
>> klass >> atomnum >> code >> resname
>> chain >> resnum >> x >> y >> z
>> occ >> temp){
std::vector<double> temp_coord;
temp_coord.push_back(x);
temp_coord.push_back(y);
temp_coord.push_back(z);
lig_trajnums.push_back(trajnum);
lig_atomnums.push_back(atomnum);
lig_resnums.push_back(resnum);
lig_xyz_coords.push_back(temp_coord);
}
// some checks
if(lig_atomnums.size() != lig_trajnums.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
if(lig_atomnums.size() != lig_resnums.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
if(lig_atomnums.size() != lig_xyz_coords.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
}
std::cout << "Lines in ligand file : " << lig_atomnums.size() << std::endl;
std::cout << "Ligand poses in file : " << lig_atomnums.size()/17 << std::endl; //all our ligands have 17 atoms
}
/* simple squared distance */
double ComputeSquaredDistance(std::vector<double> v1, std::vector<double> v2){
double dist_squared;
dist_squared = { (v1[0]-v2[0])*(v1[0]-v2[0])
+ (v1[1]-v2[1])*(v1[1]-v2[1])
+ (v1[2]-v2[2])*(v1[2]-v2[2]) };
return dist_squared;
}
/* cpp contact featurizer */
std::vector<double> LPContactFeaturizer(std::vector<int>& prot_atomnums,
std::vector<std::vector<double>>& prot_xyz_coords,
std::vector<int>& lig_trajnums,
std::vector<std::vector<double>>& lig_xyz_coords){
std::vector<double> all_distances;
for (unsigned int ii = 0; ii < lig_trajnums.size(); ii++){
for (unsigned int jj =0; jj < prot_atomnums.size(); jj++){
double temp_dist = ComputeSquaredDistance(lig_xyz_coords[ii],
prot_xyz_coords[jj]);
temp_dist = sqrt(temp_dist)/10.;
all_distances.push_back(temp_dist);
}
}
return all_distances;
}
/* cuda contact featurizer */
__global__ void cuContacts(double *pxyz, double *lxyz, double *cudists, int *plength, int *llength)
{
int pidx = threadIdx.x + blockIdx.x * blockDim.x;
int lidx = threadIdx.y + blockIdx.y * blockDim.y;
if ( (pidx < plength[0]) && (lidx< llength[0])){
cudists[pidx+plength[0]*lidx] = ( sqrt(
(pxyz[pidx*3]-lxyz[lidx*3])*(pxyz[pidx*3]-lxyz[lidx*3])
+ (pxyz[pidx*3+1]-lxyz[lidx*3+1])*(pxyz[pidx*3+1]-lxyz[lidx*3+1])
+ (pxyz[pidx*3+2]-lxyz[lidx*3+2])*(pxyz[pidx*3+2]-lxyz[lidx*3+2]) )/10. );
}
__syncthreads();
}
int main(int argc, char *argv[])
{
if (argc != 3)
{
std::cout << "Usage:" << std::endl;
{std::cout << " " << argv[0] << " <protein input file> "
<< " <ligand input file> " << std::endl;}
return 0;
}
std::string protein_inputfile = argv[1];
std::string ligand_inputfile = argv[2];
std::vector<int> prot_atomnums;
std::vector<int> prot_resnums;
std::vector<std::vector<double>> prot_xyz_coords;
std::vector<int> lig_trajnums;
std::vector<int> lig_atomnums;
std::vector<int> lig_resnums;
std::vector<std::vector<double>> lig_xyz_coords;
ProteinSetup(protein_inputfile,
prot_atomnums,
prot_resnums,
prot_xyz_coords);
LigandTrajSetup(ligand_inputfile,
lig_trajnums,
lig_atomnums,
lig_resnums,
lig_xyz_coords);
auto cpp_start = Clock::now();
/* compute distances using cpp*/
std::vector<double> distances = LPContactFeaturizer(prot_atomnums,
prot_xyz_coords,
lig_trajnums,
lig_xyz_coords);
auto cpp_end = Clock::now();
/* print out cpp time stats */
std::cout << "Number of distances to compute : " << distances.size() << std::endl;
std::cout << "Cpp distances calculated in "
<< std::chrono::duration_cast<std::chrono::microseconds>(cpp_end - cpp_start).count()
<< " microseconds" << std::endl;
double *pxyz, *lxyz, *cudists;
double *d_pxyz, *d_lxyz, *d_cudists;
int *plength, *d_plength;
int *llength, *d_llength;
int protein_size = prot_atomnums.size()*3;
int ligand_traj_size = lig_trajnums.size()*3;
int cudists_size = protein_size/3 * ligand_traj_size/3;
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of a, b, c */
checkCUDA( hipMalloc( (void **) &d_pxyz, protein_size*sizeof(double)) );
checkCUDA( hipMalloc( (void **) &d_lxyz, ligand_traj_size*sizeof(double)) );
checkCUDA( hipMalloc( (void **) &d_cudists, cudists_size*sizeof(double) ));
checkCUDA( hipMalloc( (void **) &d_plength, sizeof(int) ));
checkCUDA( hipMalloc( (void **) &d_llength, sizeof(int) ));
/* allocate space for host copies of a, b, c and setup input values */
pxyz = (double *)malloc( protein_size *sizeof(double));
lxyz = (double *)malloc( ligand_traj_size *sizeof(double));
cudists = (double *)malloc( cudists_size *sizeof(double));
plength = (int *)malloc( sizeof(int));
llength = (int *)malloc( sizeof(int));
for(unsigned int pp = 0; pp < prot_atomnums.size(); pp++){
pxyz[pp*3] = prot_xyz_coords[pp][0];
pxyz[pp*3+1] = prot_xyz_coords[pp][1];
pxyz[pp*3+2] = prot_xyz_coords[pp][2];
}
for(unsigned int ll = 0; ll < lig_trajnums.size(); ll++){
lxyz[ll*3] = lig_xyz_coords[ll][0];
lxyz[ll*3+1] = lig_xyz_coords[ll][1];
lxyz[ll*3+2] = lig_xyz_coords[ll][2];
}
plength[0] = prot_atomnums.size();
llength[0] = lig_trajnums.size();
/* copy inputs to device */
checkCUDA( hipMemcpy( d_pxyz, pxyz, protein_size*sizeof(double), hipMemcpyHostToDevice ) );
checkCUDA( hipMemcpy( d_lxyz, lxyz, ligand_traj_size*sizeof(double), hipMemcpyHostToDevice ) );
checkCUDA( hipMemcpy( d_plength, plength, sizeof(int), hipMemcpyHostToDevice) );
checkCUDA( hipMemcpy( d_llength, llength, sizeof(int), hipMemcpyHostToDevice) );
/* zero out the C array */
checkCUDA( hipMemset( d_cudists, 0, cudists_size*sizeof(double) ) );
/* setup threadblock size and grid sizes*/
dim3 threads(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1);
dim3 blocks(plength[0]/threads.x+1,
llength[0]/threads.y+1,
1 );
/* check if threads and blocks are OK */
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
if (threads.x * threads.y * threads.z > prop.maxThreadsPerBlock) {
printf("Too many threads per block \n");
}
if (threads.x > prop.maxThreadsDim[0]) {
printf("Too many threads in x-direction \n");
}
if (threads.y > prop.maxThreadsDim[1]) {
printf("Too many threads in y-direction \n");
}
if (threads.z > prop.maxThreadsDim[2]) {
printf("Too many threads in z-direction \n");
}
printf("Ready to launch kernel\n");
auto cuda_start = Clock::now();
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( cuContacts), dim3(blocks), dim3(threads) , 0, 0, d_pxyz, d_lxyz, d_cudists, d_plength, d_llength );
checkKERNEL();
auto cuda_mid = Clock::now();
/* print out CUDA time stats */
// std::cout << "CUDA distances calculated in "
// << std::chrono::duration_cast<std::chrono::microseconds>(cuda_mid - cuda_start).count()
// << " microseconds" << std::endl;
/* copy result back to host */
checkCUDA( hipMemcpy( cudists, d_cudists, cudists_size*sizeof(double), hipMemcpyDeviceToHost ) );
auto cuda_end = Clock::now();
// std::cout << "CUDA distances copied in "
// << std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_mid).count()
// << " microseconds" << std::endl;
std::cout << "CUDA distances calculated in: "
<< std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_start).count()
<< " microseconds" << std::endl;
/* print out distance pairs to file */
std::ofstream f("distances.txt");
if(f.is_open()){
for(unsigned int k = 0; k < distances.size(); k++){
f << distances[k] << " " << cudists[k] << std::endl;
}
}
f.close();
free(pxyz);
free(lxyz);
free(cudists);
free(plength);
checkCUDA( hipFree( d_pxyz ) );
checkCUDA( hipFree( d_lxyz ) );
checkCUDA( hipFree( d_cudists ) );
checkCUDA( hipFree( d_plength ) );
checkCUDA( hipDeviceReset () );
return 0;
} /* end main */
| ecae018638ce1e1343ac7fa5e3212a0160531447.cu | /* Brooke Husic and Jared Dunnmon
* Final project CME 253
* Due Feb 17 2017
*/
#include <fstream>
#include <iostream>
#include <math.h>
#include <vector>
#include <chrono>
#include "./debug.h"
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
typedef std::chrono::high_resolution_clock Clock;
/* input protein file and get its xyz coordinates */
void ProteinSetup(std::string protein_inputfile,
std::vector<int>& prot_atomnums,
std::vector<int>& prot_resnums,
std::vector<std::vector<double>>& prot_xyz_coords){
std::ifstream f(protein_inputfile.c_str());
if (f.is_open()) {
std::string klass, code, resname, chain;
int atomnum, resnum;
double x, y, z, occ, temp;
while (f >> klass >> atomnum >> code >> resname
>> chain >> resnum >> x >> y >> z
>> occ >> temp){
std::vector<double> temp_coord;
temp_coord.push_back(x);
temp_coord.push_back(y);
temp_coord.push_back(z);
prot_atomnums.push_back(atomnum);
prot_resnums.push_back(resnum);
prot_xyz_coords.push_back(temp_coord);
}
// some checks
if(prot_atomnums.size() != prot_resnums.size()){
std::cerr << "ERROR: Problem in protein file" << std::endl;
}
if(prot_atomnums.size() != prot_xyz_coords.size()){
std::cerr << "ERROR: Problem in protein file" << std::endl;
}
}
std::cout << "Lines in protein file : " << prot_atomnums.size() << std::endl;
}
/* input ligand file and get its xyz coordinates */
void LigandTrajSetup(std::string ligand_inputfile,
std::vector<int>& lig_trajnums,
std::vector<int>& lig_atomnums,
std::vector<int>& lig_resnums,
std::vector<std::vector<double>>& lig_xyz_coords){
std::ifstream f(ligand_inputfile.c_str());
if (f.is_open()) {
std::string klass, code, resname, chain;
int trajnum, atomnum, resnum;
double x, y, z, occ, temp;
while (f >> trajnum
>> klass >> atomnum >> code >> resname
>> chain >> resnum >> x >> y >> z
>> occ >> temp){
std::vector<double> temp_coord;
temp_coord.push_back(x);
temp_coord.push_back(y);
temp_coord.push_back(z);
lig_trajnums.push_back(trajnum);
lig_atomnums.push_back(atomnum);
lig_resnums.push_back(resnum);
lig_xyz_coords.push_back(temp_coord);
}
// some checks
if(lig_atomnums.size() != lig_trajnums.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
if(lig_atomnums.size() != lig_resnums.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
if(lig_atomnums.size() != lig_xyz_coords.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
}
std::cout << "Lines in ligand file : " << lig_atomnums.size() << std::endl;
std::cout << "Ligand poses in file : " << lig_atomnums.size()/17 << std::endl; //all our ligands have 17 atoms
}
/* simple squared distance */
double ComputeSquaredDistance(std::vector<double> v1, std::vector<double> v2){
double dist_squared;
dist_squared = { (v1[0]-v2[0])*(v1[0]-v2[0])
+ (v1[1]-v2[1])*(v1[1]-v2[1])
+ (v1[2]-v2[2])*(v1[2]-v2[2]) };
return dist_squared;
}
/* cpp contact featurizer */
std::vector<double> LPContactFeaturizer(std::vector<int>& prot_atomnums,
std::vector<std::vector<double>>& prot_xyz_coords,
std::vector<int>& lig_trajnums,
std::vector<std::vector<double>>& lig_xyz_coords){
std::vector<double> all_distances;
for (unsigned int ii = 0; ii < lig_trajnums.size(); ii++){
for (unsigned int jj =0; jj < prot_atomnums.size(); jj++){
double temp_dist = ComputeSquaredDistance(lig_xyz_coords[ii],
prot_xyz_coords[jj]);
temp_dist = sqrt(temp_dist)/10.;
all_distances.push_back(temp_dist);
}
}
return all_distances;
}
/* cuda contact featurizer */
__global__ void cuContacts(double *pxyz, double *lxyz, double *cudists, int *plength, int *llength)
{
int pidx = threadIdx.x + blockIdx.x * blockDim.x;
int lidx = threadIdx.y + blockIdx.y * blockDim.y;
if ( (pidx < plength[0]) && (lidx< llength[0])){
cudists[pidx+plength[0]*lidx] = ( sqrt(
(pxyz[pidx*3]-lxyz[lidx*3])*(pxyz[pidx*3]-lxyz[lidx*3])
+ (pxyz[pidx*3+1]-lxyz[lidx*3+1])*(pxyz[pidx*3+1]-lxyz[lidx*3+1])
+ (pxyz[pidx*3+2]-lxyz[lidx*3+2])*(pxyz[pidx*3+2]-lxyz[lidx*3+2]) )/10. );
}
__syncthreads();
}
int main(int argc, char *argv[])
{
if (argc != 3)
{
std::cout << "Usage:" << std::endl;
{std::cout << " " << argv[0] << " <protein input file> "
<< " <ligand input file> " << std::endl;}
return 0;
}
std::string protein_inputfile = argv[1];
std::string ligand_inputfile = argv[2];
std::vector<int> prot_atomnums;
std::vector<int> prot_resnums;
std::vector<std::vector<double>> prot_xyz_coords;
std::vector<int> lig_trajnums;
std::vector<int> lig_atomnums;
std::vector<int> lig_resnums;
std::vector<std::vector<double>> lig_xyz_coords;
ProteinSetup(protein_inputfile,
prot_atomnums,
prot_resnums,
prot_xyz_coords);
LigandTrajSetup(ligand_inputfile,
lig_trajnums,
lig_atomnums,
lig_resnums,
lig_xyz_coords);
auto cpp_start = Clock::now();
/* compute distances using cpp*/
std::vector<double> distances = LPContactFeaturizer(prot_atomnums,
prot_xyz_coords,
lig_trajnums,
lig_xyz_coords);
auto cpp_end = Clock::now();
/* print out cpp time stats */
std::cout << "Number of distances to compute : " << distances.size() << std::endl;
std::cout << "Cpp distances calculated in "
<< std::chrono::duration_cast<std::chrono::microseconds>(cpp_end - cpp_start).count()
<< " microseconds" << std::endl;
double *pxyz, *lxyz, *cudists;
double *d_pxyz, *d_lxyz, *d_cudists;
int *plength, *d_plength;
int *llength, *d_llength;
int protein_size = prot_atomnums.size()*3;
int ligand_traj_size = lig_trajnums.size()*3;
int cudists_size = protein_size/3 * ligand_traj_size/3;
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of a, b, c */
checkCUDA( cudaMalloc( (void **) &d_pxyz, protein_size*sizeof(double)) );
checkCUDA( cudaMalloc( (void **) &d_lxyz, ligand_traj_size*sizeof(double)) );
checkCUDA( cudaMalloc( (void **) &d_cudists, cudists_size*sizeof(double) ));
checkCUDA( cudaMalloc( (void **) &d_plength, sizeof(int) ));
checkCUDA( cudaMalloc( (void **) &d_llength, sizeof(int) ));
/* allocate space for host copies of a, b, c and setup input values */
pxyz = (double *)malloc( protein_size *sizeof(double));
lxyz = (double *)malloc( ligand_traj_size *sizeof(double));
cudists = (double *)malloc( cudists_size *sizeof(double));
plength = (int *)malloc( sizeof(int));
llength = (int *)malloc( sizeof(int));
for(unsigned int pp = 0; pp < prot_atomnums.size(); pp++){
pxyz[pp*3] = prot_xyz_coords[pp][0];
pxyz[pp*3+1] = prot_xyz_coords[pp][1];
pxyz[pp*3+2] = prot_xyz_coords[pp][2];
}
for(unsigned int ll = 0; ll < lig_trajnums.size(); ll++){
lxyz[ll*3] = lig_xyz_coords[ll][0];
lxyz[ll*3+1] = lig_xyz_coords[ll][1];
lxyz[ll*3+2] = lig_xyz_coords[ll][2];
}
plength[0] = prot_atomnums.size();
llength[0] = lig_trajnums.size();
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_pxyz, pxyz, protein_size*sizeof(double), cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemcpy( d_lxyz, lxyz, ligand_traj_size*sizeof(double), cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemcpy( d_plength, plength, sizeof(int), cudaMemcpyHostToDevice) );
checkCUDA( cudaMemcpy( d_llength, llength, sizeof(int), cudaMemcpyHostToDevice) );
/* zero out the C array */
checkCUDA( cudaMemset( d_cudists, 0, cudists_size*sizeof(double) ) );
/* setup threadblock size and grid sizes*/
dim3 threads(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1);
dim3 blocks(plength[0]/threads.x+1,
llength[0]/threads.y+1,
1 );
/* check if threads and blocks are OK */
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
if (threads.x * threads.y * threads.z > prop.maxThreadsPerBlock) {
printf("Too many threads per block \n");
}
if (threads.x > prop.maxThreadsDim[0]) {
printf("Too many threads in x-direction \n");
}
if (threads.y > prop.maxThreadsDim[1]) {
printf("Too many threads in y-direction \n");
}
if (threads.z > prop.maxThreadsDim[2]) {
printf("Too many threads in z-direction \n");
}
printf("Ready to launch kernel\n");
auto cuda_start = Clock::now();
/* launch the kernel on the GPU */
cuContacts<<< blocks, threads >>>( d_pxyz, d_lxyz, d_cudists, d_plength, d_llength );
checkKERNEL();
auto cuda_mid = Clock::now();
/* print out CUDA time stats */
// std::cout << "CUDA distances calculated in "
// << std::chrono::duration_cast<std::chrono::microseconds>(cuda_mid - cuda_start).count()
// << " microseconds" << std::endl;
/* copy result back to host */
checkCUDA( cudaMemcpy( cudists, d_cudists, cudists_size*sizeof(double), cudaMemcpyDeviceToHost ) );
auto cuda_end = Clock::now();
// std::cout << "CUDA distances copied in "
// << std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_mid).count()
// << " microseconds" << std::endl;
std::cout << "CUDA distances calculated in: "
<< std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_start).count()
<< " microseconds" << std::endl;
/* print out distance pairs to file */
std::ofstream f("distances.txt");
if(f.is_open()){
for(unsigned int k = 0; k < distances.size(); k++){
f << distances[k] << " " << cudists[k] << std::endl;
}
}
f.close();
free(pxyz);
free(lxyz);
free(cudists);
free(plength);
checkCUDA( cudaFree( d_pxyz ) );
checkCUDA( cudaFree( d_lxyz ) );
checkCUDA( cudaFree( d_cudists ) );
checkCUDA( cudaFree( d_plength ) );
checkCUDA( cudaDeviceReset () );
return 0;
} /* end main */
|
878211f11683f43648b035dc6e67bc9fb6700c28.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "group_points_gpu.h"
__global__ void group_points_grad_kernel_fast(int b, int c, int n, int npoints, int nsample,
const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pt_idx = index / nsample;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
int sample_idx = index % nsample;
grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx;
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0] , grad_out[0]);
}
void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
const float *grad_out, const int *idx, float *grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
hipError_t err;
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( group_points_grad_kernel_fast), dim3(blocks), dim3(threads), 0, 0, b, c, n, npoints, nsample, grad_out, idx, grad_points);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
__global__ void group_points_kernel_fast(int b, int c, int n, int npoints, int nsample,
const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pt_idx = index / nsample;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
int sample_idx = index % nsample;
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
int in_idx = bs_idx * c * n + c_idx * n + idx[0];
int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx;
out[out_idx] = points[in_idx];
}
void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx, float *out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
hipError_t err;
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
hipLaunchKernelGGL(( group_points_kernel_fast), dim3(blocks), dim3(threads), 0, 0, b, c, n, npoints, nsample, points, idx, out);
// hipDeviceSynchronize(); // for using printf in kernel function
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 878211f11683f43648b035dc6e67bc9fb6700c28.cu | #include <stdio.h>
#include <stdlib.h>
#include "cuda_utils.h"
#include "group_points_gpu.h"
__global__ void group_points_grad_kernel_fast(int b, int c, int n, int npoints, int nsample,
const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pt_idx = index / nsample;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
int sample_idx = index % nsample;
grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx;
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0] , grad_out[0]);
}
void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
const float *grad_out, const int *idx, float *grad_points) {
// grad_out: (B, C, npoints, nsample)
// idx: (B, npoints, nsample)
// output:
// grad_points: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_grad_kernel_fast<<<blocks, threads>>>(b, c, n, npoints, nsample, grad_out, idx, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void group_points_kernel_fast(int b, int c, int n, int npoints, int nsample,
const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int pt_idx = index / nsample;
if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return;
int sample_idx = index % nsample;
idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx;
int in_idx = bs_idx * c * n + c_idx * n + idx[0];
int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx;
out[out_idx] = points[in_idx];
}
void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample,
const float *points, const int *idx, float *out) {
// points: (B, C, N)
// idx: (B, npoints, nsample)
// output:
// out: (B, C, npoints, nsample)
cudaError_t err;
dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
group_points_kernel_fast<<<blocks, threads>>>(b, c, n, npoints, nsample, points, idx, out);
// cudaDeviceSynchronize(); // for using printf in kernel function
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
514a74599541a0b072bb1dcf98433be112f1a00c.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed.h>
#include <hip/hip_runtime.h> // for TORCH_HIP_VERSION
#include <magma_v2.h>
#include "magma_common_device.cuh"
#include "weight_device.cuh"
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ CeedScalar shared_data[];
template<typename T, int Q>
static __global__ void
magma_weight_1d_kernel(const T *dqweight1d, T *dV, const int v_stride, const int nelem)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int elem_id = (blockIdx.x * blockDim.y) + ty;
if (elem_id >= nelem) return;
// global memory pointers
dV += elem_id * v_stride;
// shared memory pointers
T* sTweight = (T*)shared_data;
T* sV = sTweight + Q;
sV += ty * Q;
// read dqweight_1d
if (ty == 0 && tx < Q) {
sTweight[tx] = dqweight1d[tx];
}
__syncthreads();
magma_weight_1d_device<T, Q>(sTweight, sV, tx);
__syncthreads();
// write V
dV[ tx ] = sV[ tx ];
}
//////////////////////////////////////////////////////////////////////////////////////////
template<typename T, int Q>
static magma_int_t
magma_weight_1d_kernel_driver(
const T *dqweight1d, T *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_device_t device;
magma_getdevice( &device );
magma_int_t shmem_max, nthreads_max;
magma_int_t nthreads = Q;
magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads);
magma_int_t shmem = 0;
shmem += sizeof(T) * Q; // for dqweight1d
shmem += sizeof(T) * ntcol * Q; // for output
hipDeviceGetAttribute (&nthreads_max, hipDeviceAttributeMaxThreadsPerBlock, device);
#if TORCH_HIP_VERSION >= 9000
hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeSharedMemPerBlockOptin, device);
if (shmem <= shmem_max) {
hipFuncSetAttribute(magma_weight_1d_kernel<T, Q>, hipFuncAttributeMaxDynamicSharedMemorySize, shmem);
}
#else
hipDeviceGetAttribute (&shmem_max, hipDeviceAttributeMaxSharedMemoryPerBlock, device);
#endif // TORCH_HIP_VERSION >= 9000
if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) {
return 1; // launch failed
}
else {
magma_int_t nblocks = (nelem + ntcol-1) / ntcol;
dim3 threads(nthreads, ntcol, 1);
dim3 grid(nblocks, 1, 1);
hipLaunchKernelGGL(( magma_weight_1d_kernel<T, Q>), dim3(grid), dim3(threads), shmem, magma_queue_get_cuda_stream(queue),
dqweight1d, dV, v_stride, nelem);
return (hipPeekAtLastError() == hipSuccess) ? 0 : 1;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
static magma_int_t
magma_weight_1d_q(
magma_int_t Q, const CeedScalar *dqweight1d,
CeedScalar *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (Q) {
case 1:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 1>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 2>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 3>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 4>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 5>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 6>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 7>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 8>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 9>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar,10>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_weight_1d(
magma_int_t Q, const CeedScalar *dqweight1d,
CeedScalar *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
magma_weight_1d_q(Q, dqweight1d, dV, v_stride, nelem, maxthreads, queue);
return launch_failed;
}
| 514a74599541a0b072bb1dcf98433be112f1a00c.cu | // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
#include <ceed.h>
#include <cuda.h> // for CUDA_VERSION
#include <magma_v2.h>
#include "magma_common_device.cuh"
#include "weight_device.cuh"
//////////////////////////////////////////////////////////////////////////////////////////
extern __shared__ CeedScalar shared_data[];
template<typename T, int Q>
static __global__ void
magma_weight_1d_kernel(const T *dqweight1d, T *dV, const int v_stride, const int nelem)
{
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int elem_id = (blockIdx.x * blockDim.y) + ty;
if (elem_id >= nelem) return;
// global memory pointers
dV += elem_id * v_stride;
// shared memory pointers
T* sTweight = (T*)shared_data;
T* sV = sTweight + Q;
sV += ty * Q;
// read dqweight_1d
if (ty == 0 && tx < Q) {
sTweight[tx] = dqweight1d[tx];
}
__syncthreads();
magma_weight_1d_device<T, Q>(sTweight, sV, tx);
__syncthreads();
// write V
dV[ tx ] = sV[ tx ];
}
//////////////////////////////////////////////////////////////////////////////////////////
template<typename T, int Q>
static magma_int_t
magma_weight_1d_kernel_driver(
const T *dqweight1d, T *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_device_t device;
magma_getdevice( &device );
magma_int_t shmem_max, nthreads_max;
magma_int_t nthreads = Q;
magma_int_t ntcol = (maxthreads < nthreads) ? 1 : (maxthreads / nthreads);
magma_int_t shmem = 0;
shmem += sizeof(T) * Q; // for dqweight1d
shmem += sizeof(T) * ntcol * Q; // for output
cudaDeviceGetAttribute (&nthreads_max, cudaDevAttrMaxThreadsPerBlock, device);
#if CUDA_VERSION >= 9000
cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlockOptin, device);
if (shmem <= shmem_max) {
cudaFuncSetAttribute(magma_weight_1d_kernel<T, Q>, cudaFuncAttributeMaxDynamicSharedMemorySize, shmem);
}
#else
cudaDeviceGetAttribute (&shmem_max, cudaDevAttrMaxSharedMemoryPerBlock, device);
#endif // CUDA_VERSION >= 9000
if ( (nthreads*ntcol) > nthreads_max || shmem > shmem_max ) {
return 1; // launch failed
}
else {
magma_int_t nblocks = (nelem + ntcol-1) / ntcol;
dim3 threads(nthreads, ntcol, 1);
dim3 grid(nblocks, 1, 1);
magma_weight_1d_kernel<T, Q><<<grid, threads, shmem, magma_queue_get_cuda_stream(queue)>>>
(dqweight1d, dV, v_stride, nelem);
return (cudaPeekAtLastError() == cudaSuccess) ? 0 : 1;
}
}
//////////////////////////////////////////////////////////////////////////////////////////
static magma_int_t
magma_weight_1d_q(
magma_int_t Q, const CeedScalar *dqweight1d,
CeedScalar *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
switch (Q) {
case 1:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 1>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 2:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 2>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 3:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 3>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 4:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 4>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 5:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 5>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 6:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 6>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 7:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 7>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 8:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 8>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 9:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar, 9>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
case 10:
launch_failed = magma_weight_1d_kernel_driver<CeedScalar,10>
(dqweight1d, dV, v_stride, nelem, maxthreads, queue);
break;
default: launch_failed = 1;
}
return launch_failed;
}
//////////////////////////////////////////////////////////////////////////////////////////
extern "C" magma_int_t
magma_weight_1d(
magma_int_t Q, const CeedScalar *dqweight1d,
CeedScalar *dV, magma_int_t v_stride,
magma_int_t nelem, magma_int_t maxthreads, magma_queue_t queue)
{
magma_int_t launch_failed = 0;
magma_weight_1d_q(Q, dqweight1d, dV, v_stride, nelem, maxthreads, queue);
return launch_failed;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.