hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
e06fc7ce90597fdc2f25d923820d964b9e24a885.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#define N (896 * 896)
#define FULL_DATA_SIZE (1024 * 1024 * 10)
__global__ void add_vectors(int *a, int *b, int *c) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void mul_vectors(int *a, int *b, int *c) {
__shared__ float cache[256];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
int temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (cacheIndex < s) {
cache[cacheIndex] += cache[cacheIndex + s];
}
__syncthreads();
}
if (cacheIndex == 0) c[blockIdx.x] = cache[0];
}
int main() {
srand(time(NULL));
float elapsed_time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipStream_t stream_0, stream_1;
hipStreamCreate(&stream_0);
hipStreamCreate(&stream_1);
int *h_a_p, *h_b_p, *h_c_p;
int *dev_a0, *dev_b0, *dev_c0, *dev_a1, *dev_b1, *dev_c1;
hipHostMalloc((void **) &h_a_p, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void **) &h_b_p, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault);
hipHostMalloc((void **) &h_c_p, FULL_DATA_SIZE * sizeof(int), hipHostMallocDefault);
hipMalloc((void **)&dev_a0, FULL_DATA_SIZE * sizeof(int));
hipMalloc((void **)&dev_b0, FULL_DATA_SIZE * sizeof(int));
hipMalloc((void **)&dev_c0, FULL_DATA_SIZE * sizeof(int));
hipMalloc((void **)&dev_a1, FULL_DATA_SIZE * sizeof(int));
hipMalloc((void **)&dev_b1, FULL_DATA_SIZE * sizeof(int));
hipMalloc((void **)&dev_c1, FULL_DATA_SIZE * sizeof(int));
for (int i = 0; i < FULL_DATA_SIZE; i++) {
h_a_p[i] = rand() % 1000;
h_b_p[i] = rand() % 1000;
}
hipEventRecord(start, 0);
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
hipMemcpyAsync(dev_a0, h_a_p + i, N * sizeof(int), hipMemcpyHostToDevice, stream_0);
hipMemcpyAsync(dev_a1, h_a_p + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream_1);
hipMemcpyAsync(dev_b0, h_b_p + i, N * sizeof(int), hipMemcpyHostToDevice, stream_0);
hipMemcpyAsync(dev_b1, h_b_p + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream_1);
hipLaunchKernelGGL(( add_vectors) , dim3(N / 256), dim3(256), 0, stream_0 , dev_a0, dev_b0, dev_c0);
hipLaunchKernelGGL(( add_vectors) , dim3(N / 256), dim3(256), 0, stream_1 , dev_a1, dev_b1, dev_c1);
hipMemcpyAsync(h_c_p + i, dev_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream_0);
hipMemcpyAsync(h_c_p + i + N, dev_c1, N * sizeof(int), hipMemcpyDeviceToHost, stream_1);
}
hipStreamSynchronize(stream_0);
hipStreamSynchronize(stream_1);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf("Elapsed time (add vectors): %f\n", elapsed_time);
for (int i = 0; i < FULL_DATA_SIZE; i++) {
h_c_p[i] = 0;
}
hipEventRecord(start, 0);
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
hipMemcpyAsync(dev_a0, h_a_p + i, N * sizeof(int), hipMemcpyHostToDevice, stream_0);
hipMemcpyAsync(dev_a1, h_a_p + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream_1);
hipMemcpyAsync(dev_b0, h_b_p + i, N * sizeof(int), hipMemcpyHostToDevice, stream_0);
hipMemcpyAsync(dev_b1, h_b_p + i + N, N * sizeof(int), hipMemcpyHostToDevice, stream_1);
hipLaunchKernelGGL(( mul_vectors) , dim3(N / 256), dim3(256), 0, stream_0 , dev_a0, dev_b0, dev_c0);
hipLaunchKernelGGL(( mul_vectors) , dim3(N / 256), dim3(256), 0, stream_1 , dev_a1, dev_b1, dev_c1);
hipMemcpyAsync(h_c_p + i, dev_c0, N * sizeof(int), hipMemcpyDeviceToHost, stream_0);
hipMemcpyAsync(h_c_p + i + N, dev_c1, N * sizeof(int), hipMemcpyDeviceToHost, stream_1);
}
hipStreamSynchronize(stream_0);
hipStreamSynchronize(stream_1);
long long result = 0;
for (int i = 0; i < (N / 256); i++) result += h_c_p[i];
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
printf("Elapsed time (mul vectors): %f\n", elapsed_time);
hipFree(dev_a0);
hipFree(dev_a1);
hipFree(dev_b0);
hipFree(dev_b1);
hipFree(dev_c0);
hipFree(dev_c1);
hipHostFree(h_a_p);
hipHostFree(h_b_p);
hipHostFree(h_c_p);
hipEventDestroy(start);
hipEventDestroy(stop);
hipStreamDestroy(stream_0);
hipStreamDestroy(stream_1);
return 0;
} | e06fc7ce90597fdc2f25d923820d964b9e24a885.cu | #include <stdlib.h>
#include <time.h>
#include <stdio.h>
#define N (896 * 896)
#define FULL_DATA_SIZE (1024 * 1024 * 10)
__global__ void add_vectors(int *a, int *b, int *c) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
c[i] = a[i] + b[i];
}
__global__ void mul_vectors(int *a, int *b, int *c) {
__shared__ float cache[256];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
int temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (cacheIndex < s) {
cache[cacheIndex] += cache[cacheIndex + s];
}
__syncthreads();
}
if (cacheIndex == 0) c[blockIdx.x] = cache[0];
}
int main() {
srand(time(NULL));
float elapsed_time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStream_t stream_0, stream_1;
cudaStreamCreate(&stream_0);
cudaStreamCreate(&stream_1);
int *h_a_p, *h_b_p, *h_c_p;
int *dev_a0, *dev_b0, *dev_c0, *dev_a1, *dev_b1, *dev_c1;
cudaHostAlloc((void **) &h_a_p, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **) &h_b_p, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **) &h_c_p, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
cudaMalloc((void **)&dev_a0, FULL_DATA_SIZE * sizeof(int));
cudaMalloc((void **)&dev_b0, FULL_DATA_SIZE * sizeof(int));
cudaMalloc((void **)&dev_c0, FULL_DATA_SIZE * sizeof(int));
cudaMalloc((void **)&dev_a1, FULL_DATA_SIZE * sizeof(int));
cudaMalloc((void **)&dev_b1, FULL_DATA_SIZE * sizeof(int));
cudaMalloc((void **)&dev_c1, FULL_DATA_SIZE * sizeof(int));
for (int i = 0; i < FULL_DATA_SIZE; i++) {
h_a_p[i] = rand() % 1000;
h_b_p[i] = rand() % 1000;
}
cudaEventRecord(start, 0);
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
cudaMemcpyAsync(dev_a0, h_a_p + i, N * sizeof(int), cudaMemcpyHostToDevice, stream_0);
cudaMemcpyAsync(dev_a1, h_a_p + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream_1);
cudaMemcpyAsync(dev_b0, h_b_p + i, N * sizeof(int), cudaMemcpyHostToDevice, stream_0);
cudaMemcpyAsync(dev_b1, h_b_p + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream_1);
add_vectors <<< N / 256, 256, 0, stream_0 >>> (dev_a0, dev_b0, dev_c0);
add_vectors <<< N / 256, 256, 0, stream_1 >>> (dev_a1, dev_b1, dev_c1);
cudaMemcpyAsync(h_c_p + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream_0);
cudaMemcpyAsync(h_c_p + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream_1);
}
cudaStreamSynchronize(stream_0);
cudaStreamSynchronize(stream_1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("Elapsed time (add vectors): %f\n", elapsed_time);
for (int i = 0; i < FULL_DATA_SIZE; i++) {
h_c_p[i] = 0;
}
cudaEventRecord(start, 0);
for (int i = 0; i < FULL_DATA_SIZE; i += N * 2) {
cudaMemcpyAsync(dev_a0, h_a_p + i, N * sizeof(int), cudaMemcpyHostToDevice, stream_0);
cudaMemcpyAsync(dev_a1, h_a_p + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream_1);
cudaMemcpyAsync(dev_b0, h_b_p + i, N * sizeof(int), cudaMemcpyHostToDevice, stream_0);
cudaMemcpyAsync(dev_b1, h_b_p + i + N, N * sizeof(int), cudaMemcpyHostToDevice, stream_1);
mul_vectors <<< N / 256, 256, 0, stream_0 >>> (dev_a0, dev_b0, dev_c0);
mul_vectors <<< N / 256, 256, 0, stream_1 >>> (dev_a1, dev_b1, dev_c1);
cudaMemcpyAsync(h_c_p + i, dev_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream_0);
cudaMemcpyAsync(h_c_p + i + N, dev_c1, N * sizeof(int), cudaMemcpyDeviceToHost, stream_1);
}
cudaStreamSynchronize(stream_0);
cudaStreamSynchronize(stream_1);
long long result = 0;
for (int i = 0; i < (N / 256); i++) result += h_c_p[i];
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("Elapsed time (mul vectors): %f\n", elapsed_time);
cudaFree(dev_a0);
cudaFree(dev_a1);
cudaFree(dev_b0);
cudaFree(dev_b1);
cudaFree(dev_c0);
cudaFree(dev_c1);
cudaFreeHost(h_a_p);
cudaFreeHost(h_b_p);
cudaFreeHost(h_c_p);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaStreamDestroy(stream_0);
cudaStreamDestroy(stream_1);
return 0;
} |
f9e20a464273e07c90818f1cfbf5005b4a006958.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int c = blockDim.x * blockIdx.x + threadIdx.x;
int r = blockDim.y * blockIdx.y + threadIdx.y;
if((c < numCols) && (r < numRows)){
int idx = r * numCols + c;
//printf("%d \n", idx);
float result = 0.f;
for(int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r){
for(int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c){
int filter_idx = (filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2;
float filter_value = filter[filter_idx];
int image_r = min( max((r + filter_r), 0), numRows - 1 );
int image_c = min( max((c + filter_c), 0), numCols - 1 );
int image_idx = image_r * numCols + image_c;
float image_value = static_cast<float>(inputChannel[image_idx]);
result += image_value * filter_value;
}
}
outputChannel[idx] = result;
}
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if((x < numCols) && (y < numRows)){
int idx = y * numCols + x;
unsigned char red = inputImageRGBA[idx].x;
unsigned char green = inputImageRGBA[idx].y;
unsigned char blue = inputImageRGBA[idx].z;
redChannel[idx] = red;
greenChannel[idx] = green;
blueChannel[idx] = blue;
}
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with hipMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc
checkCudaErrors(hipMalloc(&d_filter, filterWidth * filterWidth * sizeof(float)));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(hipMemcpy(d_filter, h_filter, filterWidth * filterWidth * sizeof(float), hipMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const int DIM = 32;
int x = (numCols + DIM - 1)/DIM;
int y = (numRows + DIM - 1)/DIM;
const dim3 gridSize(x, y, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 blockSize(DIM, DIM, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
hipLaunchKernelGGL(( separateChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize), dim3(blockSize), 0, 0, d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(hipFree(d_red));
checkCudaErrors(hipFree(d_green));
checkCudaErrors(hipFree(d_blue));
}
| f9e20a464273e07c90818f1cfbf5005b4a006958.cu | #include <stdio.h>
// Homework 2
// Image Blurring
//
// In this homework we are blurring an image. To do this, imagine that we have
// a square array of weight values. For each pixel in the image, imagine that we
// overlay this square array of weights on top of the image such that the center
// of the weight array is aligned with the current pixel. To compute a blurred
// pixel value, we multiply each pair of numbers that line up. In other words, we
// multiply each weight with the pixel underneath it. Finally, we add up all of the
// multiplied numbers and assign that value to our output for the current pixel.
// We repeat this process for all the pixels in the image.
// To help get you started, we have included some useful notes here.
//****************************************************************************
// For a color image that has multiple channels, we suggest separating
// the different color channels so that each color is stored contiguously
// instead of being interleaved. This will simplify your code.
// That is instead of RGBARGBARGBARGBA... we suggest transforming to three
// arrays (as in the previous homework we ignore the alpha channel again):
// 1) RRRRRRRR...
// 2) GGGGGGGG...
// 3) BBBBBBBB...
//
// The original layout is known an Array of Structures (AoS) whereas the
// format we are converting to is known as a Structure of Arrays (SoA).
// As a warm-up, we will ask you to write the kernel that performs this
// separation. You should then write the "meat" of the assignment,
// which is the kernel that performs the actual blur. We provide code that
// re-combines your blurred results for each color channel.
//****************************************************************************
// You must fill in the gaussian_blur kernel to perform the blurring of the
// inputChannel, using the array of weights, and put the result in the outputChannel.
// Here is an example of computing a blur, using a weighted average, for a single
// pixel in a small image.
//
// Array of weights:
//
// 0.0 0.2 0.0
// 0.2 0.2 0.2
// 0.0 0.2 0.0
//
// Image (note that we align the array of weights to the center of the box):
//
// 1 2 5 2 0 3
// -------
// 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 +
// | |
// 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2
// | |
// 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3
// -------
// 9 6 5 0 3 9
//
// (1) (2) (3)
//
// A good starting place is to map each thread to a pixel as you have before.
// Then every thread can perform steps 2 and 3 in the diagram above
// completely independently of one another.
// Note that the array of weights is square, so its height is the same as its width.
// We refer to the array of weights as a filter, and we refer to its width with the
// variable filterWidth.
//****************************************************************************
// Your homework submission will be evaluated based on correctness and speed.
// We test each pixel against a reference solution. If any pixel differs by
// more than some small threshold value, the system will tell you that your
// solution is incorrect, and it will let you try again.
// Once you have gotten that working correctly, then you can think about using
// shared memory and having the threads cooperate to achieve better performance.
//****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//
// Writing code the safe way requires slightly more typing, but is very helpful for
// catching mistakes. If you write code the unsafe way and you make a mistake, then
// any subsequent kernels won't compute anything, and it will be hard to figure out
// why. Writing code the safe way will inform you as soon as you make a mistake.
// Finally, remember to free the memory you allocate at the end of the function.
//****************************************************************************
#include "utils.h"
__global__
void gaussian_blur(const unsigned char* const inputChannel,
unsigned char* const outputChannel,
int numRows, int numCols,
const float* const filter, const int filterWidth)
{
// TODO
// NOTE: Be sure to compute any intermediate results in floating point
// before storing the final result as unsigned char.
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
// NOTE: If a thread's absolute position 2D position is within the image, but some of
// its neighbors are outside the image, then you will need to be extra careful. Instead
// of trying to read such a neighbor value from GPU memory (which won't work because
// the value is out of bounds), you should explicitly clamp the neighbor values you read
// to be within the bounds of the image. If this is not clear to you, then please refer
// to sequential reference solution for the exact clamping semantics you should follow.
int c = blockDim.x * blockIdx.x + threadIdx.x;
int r = blockDim.y * blockIdx.y + threadIdx.y;
if((c < numCols) && (r < numRows)){
int idx = r * numCols + c;
//printf("%d \n", idx);
float result = 0.f;
for(int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r){
for(int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c){
int filter_idx = (filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2;
float filter_value = filter[filter_idx];
int image_r = min( max((r + filter_r), 0), numRows - 1 );
int image_c = min( max((c + filter_c), 0), numCols - 1 );
int image_idx = image_r * numCols + image_c;
float image_value = static_cast<float>(inputChannel[image_idx]);
result += image_value * filter_value;
}
}
outputChannel[idx] = result;
}
}
//This kernel takes in an image represented as a uchar4 and splits
//it into three images consisting of only one color channel each
__global__
void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
// TODO
//
// NOTE: Be careful not to try to access memory that is outside the bounds of
// the image. You'll want code that performs the following check before accessing
// GPU memory:
//
// if ( absolute_image_position_x >= numCols ||
// absolute_image_position_y >= numRows )
// {
// return;
// }
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
if((x < numCols) && (y < numRows)){
int idx = y * numCols + x;
unsigned char red = inputImageRGBA[idx].x;
unsigned char green = inputImageRGBA[idx].y;
unsigned char blue = inputImageRGBA[idx].z;
redChannel[idx] = red;
greenChannel[idx] = green;
blueChannel[idx] = blue;
}
}
//This kernel takes in three color channels and recombines them
//into one image. The alpha channel is set to 255 to represent
//that this image has no transparency.
__global__
void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
//make sure we don't try and access memory outside the image
//by having any threads mapped there return early
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows)
return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
//Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage,
const float* const h_filter, const size_t filterWidth)
{
//allocate memory for the three different channels
//original
checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage));
checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage));
//TODO:
//Allocate memory for the filter on the GPU
//Use the pointer d_filter that we have already declared for you
//You need to allocate memory for the filter with cudaMalloc
//be sure to use checkCudaErrors like the above examples to
//be able to tell if anything goes wrong
//IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc
checkCudaErrors(cudaMalloc(&d_filter, filterWidth * filterWidth * sizeof(float)));
//TODO:
//Copy the filter on the host (h_filter) to the memory you just allocated
//on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice);
//Remember to use checkCudaErrors!
checkCudaErrors(cudaMemcpy(d_filter, h_filter, filterWidth * filterWidth * sizeof(float), cudaMemcpyHostToDevice));
}
void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA,
uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols,
unsigned char *d_redBlurred,
unsigned char *d_greenBlurred,
unsigned char *d_blueBlurred,
const int filterWidth)
{
//TODO: Set reasonable block size (i.e., number of threads per block)
const int DIM = 32;
int x = (numCols + DIM - 1)/DIM;
int y = (numRows + DIM - 1)/DIM;
const dim3 gridSize(x, y, 1);
//TODO:
//Compute correct grid size (i.e., number of blocks per kernel launch)
//from the image size and and block size.
const dim3 blockSize(DIM, DIM, 1);
//TODO: Launch a kernel for separating the RGBA image into different color channels
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
//TODO: Call your convolution kernel here 3 times, once for each color channel.
gaussian_blur<<<gridSize, blockSize>>>(d_red, d_redBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_green, d_greenBlurred, numRows, numCols, d_filter, filterWidth);
gaussian_blur<<<gridSize, blockSize>>>(d_blue, d_blueBlurred, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
// Now we recombine your results. We take care of launching this kernel for you.
//
// NOTE: This kernel launch depends on the gridSize and blockSize variables,
// which you must set yourself.
recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,
d_greenBlurred,
d_blueBlurred,
d_outputImageRGBA,
numRows,
numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
//TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
}
|
61c5e488dde6b464704d2eaf0ef66bf5b3af4f16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 27.08.2018
//
#include <ops/declarable/helpers/range.h>
#include "execution/cuda/LaunchDims.h"
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static SD_KERNEL void global_range(void* output, sd::LongType length, T start, T delta) {
auto buff = reinterpret_cast<T*>(output);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (sd::LongType i = tid; i < length; i += step) {
buff[i] = static_cast<T>(start) + static_cast<T>(i) * static_cast<T>(delta);
}
}
//////////////////////////////////////////////////////////////////////////
// be careful: outVector must have c-order and ews = 1 !!!
template <typename T>
static void _range(sd::LaunchContext* context, const NDArray& start, const NDArray& delta, NDArray& outVector) {
dim3 launchDims = getLaunchDims("range");
hipLaunchKernelGGL(( global_range<T>), dim3(launchDims.y), dim3(launchDims.x), launchDims.z, *context->getCudaStream(), outVector.specialBuffer(), outVector.lengthOf(),
start.e<T>(0), delta.e<T>(0));
}
void range(sd::LaunchContext* context, const NDArray& start, const NDArray& delta, NDArray& outVector) {
NDArray::prepareSpecialUse({&outVector}, {&start, &delta});
BUILD_SINGLE_SELECTOR(outVector.dataType(), _range, (context, start, delta, outVector), SD_COMMON_TYPES);
NDArray::registerSpecialUse({&outVector}, {&start, &delta});
}
} // namespace helpers
} // namespace ops
} // namespace sd
| 61c5e488dde6b464704d2eaf0ef66bf5b3af4f16.cu | /* ******************************************************************************
*
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 27.08.2018
//
#include <ops/declarable/helpers/range.h>
#include "execution/cuda/LaunchDims.h"
namespace sd {
namespace ops {
namespace helpers {
template <typename T>
static SD_KERNEL void global_range(void* output, sd::LongType length, T start, T delta) {
auto buff = reinterpret_cast<T*>(output);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (sd::LongType i = tid; i < length; i += step) {
buff[i] = static_cast<T>(start) + static_cast<T>(i) * static_cast<T>(delta);
}
}
//////////////////////////////////////////////////////////////////////////
// be careful: outVector must have c-order and ews = 1 !!!
template <typename T>
static void _range(sd::LaunchContext* context, const NDArray& start, const NDArray& delta, NDArray& outVector) {
dim3 launchDims = getLaunchDims("range");
global_range<T><<<launchDims.y, launchDims.x, launchDims.z, *context->getCudaStream()>>>(outVector.specialBuffer(), outVector.lengthOf(),
start.e<T>(0), delta.e<T>(0));
}
void range(sd::LaunchContext* context, const NDArray& start, const NDArray& delta, NDArray& outVector) {
NDArray::prepareSpecialUse({&outVector}, {&start, &delta});
BUILD_SINGLE_SELECTOR(outVector.dataType(), _range, (context, start, delta, outVector), SD_COMMON_TYPES);
NDArray::registerSpecialUse({&outVector}, {&start, &delta});
}
} // namespace helpers
} // namespace ops
} // namespace sd
|
ca1d95a134d3e5d1ea4f5a1b1bcb00b231a37bf5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Modified by Nuttiiya Seekhao to support volume rendering of float value
// from main memory
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <helper_cuda.h>
#include <helper_math.h>
typedef unsigned int uint;
typedef unsigned char uchar;
hipArray *d_volumeArray = 0;
hipArray *d_transferFuncArray;
//typedef unsigned char VolumeType;
typedef float VolumeType;
texture<VolumeType, 3, hipReadModeElementType> tex;
//texture<VolumeType, 3, hipReadModeNormalizedFloat> tex; // 3D texture
texture<float4, 1, hipReadModeElementType> transferTex; // 1D transfer function texture
typedef struct
{
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray
{
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
__global__ void
d_render(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float opacityThreshold = 0.95f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
for (int i=0; i<maxSteps; i++)
{
// read from 3D texture
// remap position to [0, 1]
float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
//sample *= 64.0f; // scale for 10-bit data
// lookup in transfer function texture
float4 col = tex1D(transferTex, (sample-transferOffset)*transferScale);
col.w *= density;
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col*(1.0f - sum.w);
// exit early if opaque
if (sum.w > opacityThreshold)
break;
t += tstep;
if (t > tfar) break;
pos += step;
}
sum *= brightness;
// write output color
d_output[y*imageW + x] = rgbaFloatToInt(sum);
}
__global__ void
d_render_dim(uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float opacityThreshold = 0.95f;
// Calculate box dimensions using largest dimension as reference
const float a = -1.0f;
const float b = +1.0f;
const float ref = (float) max(nx, max(ny, nz));
const float x_halfwidth = (((float) nx)/(2.0f * ref))*(b-a);
const float y_halfwidth = (((float) ny)/(2.0f * ref))*(b-a);
const float z_halfwidth = (((float) nz)/(2.0f * ref))*(b-a);
const float3 boxMin = make_float3(-1.0f*x_halfwidth, -1.0f*y_halfwidth, -1.0f*z_halfwidth);
const float3 boxMax = make_float3( 1.0f*x_halfwidth, 1.0f*y_halfwidth, 1.0f*z_halfwidth);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
for (int i=0; i<maxSteps; i++)
{
// read from 3D texture
// remap position to [0, 1]
float posx = (pos.x + x_halfwidth)/(2.0f*x_halfwidth);
float posy = (pos.y + y_halfwidth)/(2.0f*y_halfwidth);
float posz = (pos.z + z_halfwidth)/(2.0f*z_halfwidth);
float sample = tex3D(tex, posx, posy, posz);
//sample *= 64.0f; // scale for 10-bit data
// lookup in transfer function texture
float4 col = tex1D(transferTex, (sample-transferOffset)*transferScale);
col.w *= density;
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col*(1.0f - sum.w);
// exit early if opaque
if (sum.w > opacityThreshold)
break;
t += tstep;
if (t > tfar) break;
pos += step;
}
sum *= brightness;
// write output color
d_output[y*imageW + x] = rgbaFloatToInt(sum);
}
extern "C"
void setTextureFilterMode(bool bLinearFilter)
{
tex.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint;
}
extern "C"
void bufferECMmap(hipMemcpy3DParms copyParams)
{
checkCudaErrors(hipMemcpy3D(©Params));
}
extern "C"
void initCuda(void *h_volume, hipExtent volumeSize, hipMemcpy3DParms ©Params)
{
// create 3D array
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<VolumeType>();
checkCudaErrors(hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize));
// copy data to 3D array
// hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = hipMemcpyHostToDevice;
checkCudaErrors(hipMemcpy3D(©Params));
// set texture parameters
tex.normalized = true; // access with normalized texture coordinates
tex.filterMode = hipFilterModeLinear; // linear interpolation
tex.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates
tex.addressMode[1] = hipAddressModeClamp;
// bind array to 3D texture
checkCudaErrors(hipBindTextureToArray(tex, d_volumeArray, channelDesc));
// create transfer function texture
// float4 transferFunc[] =
// {
// { 0.0, 0.0, 0.0, 0.0, },
// {1.000, 0.412, 0.706, 0.2},
// { 1.0, 0.0, 0.0, 1.0, },
// { 1.0, 0.5, 0.0, 1.0, },
// { 1.0, 1.0, 0.0, 1.0, },
// { 0.0, 1.0, 0.0, 1.0, },
// { 0.0, 1.0, 1.0, 1.0, },
// { 0.0, 0.0, 1.0, 1.0, },
// { 1.0, 0.0, 1.0, 1.0, },
// { 0.0, 0.0, 1.0, 1.0, },
// };
float4 transferFunc[] =
{
{ 0.0, 0.0, 0.0, 0.0, },
// {1.000, 0.412, 0.706, 0.2}, // pink
{ 1.0, 0.5, 0.0, 1.0, },
{ 1.0, 0.0, 0.0, 1.0, }, // red
{ 1.0, 0.0, 0.0, 1.0, }, // red
};
hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>();
hipArray *d_transferFuncArray;
checkCudaErrors(hipMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1));
checkCudaErrors(hipMemcpyToArray(d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice));
transferTex.filterMode = hipFilterModeLinear;
transferTex.normalized = true; // access with normalized texture coordinates
transferTex.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates
// Bind the array to the texture
checkCudaErrors(hipBindTextureToArray(transferTex, d_transferFuncArray, channelDesc2));
}
extern "C"
void freeCudaBuffers()
{
checkCudaErrors(hipFreeArray(d_volumeArray));
checkCudaErrors(hipFreeArray(d_transferFuncArray));
}
extern "C"
void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH,
float density, float brightness, float transferOffset, float transferScale)
{
hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, d_output, imageW, imageH, density,
brightness, transferOffset, transferScale);
}
extern "C"
void render_kernel_dim(dim3 gridSize, dim3 blockSize, uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH,
float density, float brightness, float transferOffset, float transferScale)
{
hipLaunchKernelGGL(( d_render_dim), dim3(gridSize), dim3(blockSize), 0, 0, d_output, nx, ny, nz, imageW, imageH, density,
brightness, transferOffset, transferScale);
}
extern "C"
void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
checkCudaErrors(hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix));
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
| ca1d95a134d3e5d1ea4f5a1b1bcb00b231a37bf5.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// Modified by Nuttiiya Seekhao to support volume rendering of float value
// from main memory
// Simple 3D volume renderer
#ifndef _VOLUMERENDER_KERNEL_CU_
#define _VOLUMERENDER_KERNEL_CU_
#include <helper_cuda.h>
#include <helper_math.h>
typedef unsigned int uint;
typedef unsigned char uchar;
cudaArray *d_volumeArray = 0;
cudaArray *d_transferFuncArray;
//typedef unsigned char VolumeType;
typedef float VolumeType;
texture<VolumeType, 3, cudaReadModeElementType> tex;
//texture<VolumeType, 3, cudaReadModeNormalizedFloat> tex; // 3D texture
texture<float4, 1, cudaReadModeElementType> transferTex; // 1D transfer function texture
typedef struct
{
float4 m[3];
} float3x4;
__constant__ float3x4 c_invViewMatrix; // inverse view matrix
struct Ray
{
float3 o; // origin
float3 d; // direction
};
// intersect ray with a box
// http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm
__device__
int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar)
{
// compute intersection of ray with all six bbox planes
float3 invR = make_float3(1.0f) / r.d;
float3 tbot = invR * (boxmin - r.o);
float3 ttop = invR * (boxmax - r.o);
// re-order intersections to find smallest and largest on each axis
float3 tmin = fminf(ttop, tbot);
float3 tmax = fmaxf(ttop, tbot);
// find the largest tmin and the smallest tmax
float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z));
float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z));
*tnear = largest_tmin;
*tfar = smallest_tmax;
return smallest_tmax > largest_tmin;
}
// transform vector by matrix (no translation)
__device__
float3 mul(const float3x4 &M, const float3 &v)
{
float3 r;
r.x = dot(v, make_float3(M.m[0]));
r.y = dot(v, make_float3(M.m[1]));
r.z = dot(v, make_float3(M.m[2]));
return r;
}
// transform vector by matrix with translation
__device__
float4 mul(const float3x4 &M, const float4 &v)
{
float4 r;
r.x = dot(v, M.m[0]);
r.y = dot(v, M.m[1]);
r.z = dot(v, M.m[2]);
r.w = 1.0f;
return r;
}
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
__global__ void
d_render(uint *d_output, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float opacityThreshold = 0.95f;
const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f);
const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
for (int i=0; i<maxSteps; i++)
{
// read from 3D texture
// remap position to [0, 1]
float sample = tex3D(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f);
//sample *= 64.0f; // scale for 10-bit data
// lookup in transfer function texture
float4 col = tex1D(transferTex, (sample-transferOffset)*transferScale);
col.w *= density;
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col*(1.0f - sum.w);
// exit early if opaque
if (sum.w > opacityThreshold)
break;
t += tstep;
if (t > tfar) break;
pos += step;
}
sum *= brightness;
// write output color
d_output[y*imageW + x] = rgbaFloatToInt(sum);
}
__global__ void
d_render_dim(uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH,
float density, float brightness,
float transferOffset, float transferScale)
{
const int maxSteps = 500;
const float tstep = 0.01f;
const float opacityThreshold = 0.95f;
// Calculate box dimensions using largest dimension as reference
const float a = -1.0f;
const float b = +1.0f;
const float ref = (float) max(nx, max(ny, nz));
const float x_halfwidth = (((float) nx)/(2.0f * ref))*(b-a);
const float y_halfwidth = (((float) ny)/(2.0f * ref))*(b-a);
const float z_halfwidth = (((float) nz)/(2.0f * ref))*(b-a);
const float3 boxMin = make_float3(-1.0f*x_halfwidth, -1.0f*y_halfwidth, -1.0f*z_halfwidth);
const float3 boxMax = make_float3( 1.0f*x_halfwidth, 1.0f*y_halfwidth, 1.0f*z_halfwidth);
uint x = blockIdx.x*blockDim.x + threadIdx.x;
uint y = blockIdx.y*blockDim.y + threadIdx.y;
if ((x >= imageW) || (y >= imageH)) return;
float u = (x / (float) imageW)*2.0f-1.0f;
float v = (y / (float) imageH)*2.0f-1.0f;
// calculate eye ray in world space
Ray eyeRay;
eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f)));
eyeRay.d = normalize(make_float3(u, v, -2.0f));
eyeRay.d = mul(c_invViewMatrix, eyeRay.d);
// find intersection with box
float tnear, tfar;
int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar);
if (!hit) return;
if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane
// march along ray from front to back, accumulating color
float4 sum = make_float4(0.0f);
float t = tnear;
float3 pos = eyeRay.o + eyeRay.d*tnear;
float3 step = eyeRay.d*tstep;
for (int i=0; i<maxSteps; i++)
{
// read from 3D texture
// remap position to [0, 1]
float posx = (pos.x + x_halfwidth)/(2.0f*x_halfwidth);
float posy = (pos.y + y_halfwidth)/(2.0f*y_halfwidth);
float posz = (pos.z + z_halfwidth)/(2.0f*z_halfwidth);
float sample = tex3D(tex, posx, posy, posz);
//sample *= 64.0f; // scale for 10-bit data
// lookup in transfer function texture
float4 col = tex1D(transferTex, (sample-transferOffset)*transferScale);
col.w *= density;
// "under" operator for back-to-front blending
//sum = lerp(sum, col, col.w);
// pre-multiply alpha
col.x *= col.w;
col.y *= col.w;
col.z *= col.w;
// "over" operator for front-to-back blending
sum = sum + col*(1.0f - sum.w);
// exit early if opaque
if (sum.w > opacityThreshold)
break;
t += tstep;
if (t > tfar) break;
pos += step;
}
sum *= brightness;
// write output color
d_output[y*imageW + x] = rgbaFloatToInt(sum);
}
extern "C"
void setTextureFilterMode(bool bLinearFilter)
{
tex.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint;
}
extern "C"
void bufferECMmap(cudaMemcpy3DParms copyParams)
{
checkCudaErrors(cudaMemcpy3D(©Params));
}
extern "C"
void initCuda(void *h_volume, cudaExtent volumeSize, cudaMemcpy3DParms ©Params)
{
// create 3D array
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<VolumeType>();
checkCudaErrors(cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize));
// copy data to 3D array
// cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height);
copyParams.dstArray = d_volumeArray;
copyParams.extent = volumeSize;
copyParams.kind = cudaMemcpyHostToDevice;
checkCudaErrors(cudaMemcpy3D(©Params));
// set texture parameters
tex.normalized = true; // access with normalized texture coordinates
tex.filterMode = cudaFilterModeLinear; // linear interpolation
tex.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates
tex.addressMode[1] = cudaAddressModeClamp;
// bind array to 3D texture
checkCudaErrors(cudaBindTextureToArray(tex, d_volumeArray, channelDesc));
// create transfer function texture
// float4 transferFunc[] =
// {
// { 0.0, 0.0, 0.0, 0.0, },
// {1.000, 0.412, 0.706, 0.2},
// { 1.0, 0.0, 0.0, 1.0, },
// { 1.0, 0.5, 0.0, 1.0, },
// { 1.0, 1.0, 0.0, 1.0, },
// { 0.0, 1.0, 0.0, 1.0, },
// { 0.0, 1.0, 1.0, 1.0, },
// { 0.0, 0.0, 1.0, 1.0, },
// { 1.0, 0.0, 1.0, 1.0, },
// { 0.0, 0.0, 1.0, 1.0, },
// };
float4 transferFunc[] =
{
{ 0.0, 0.0, 0.0, 0.0, },
// {1.000, 0.412, 0.706, 0.2}, // pink
{ 1.0, 0.5, 0.0, 1.0, },
{ 1.0, 0.0, 0.0, 1.0, }, // red
{ 1.0, 0.0, 0.0, 1.0, }, // red
};
cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>();
cudaArray *d_transferFuncArray;
checkCudaErrors(cudaMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1));
checkCudaErrors(cudaMemcpyToArray(d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice));
transferTex.filterMode = cudaFilterModeLinear;
transferTex.normalized = true; // access with normalized texture coordinates
transferTex.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates
// Bind the array to the texture
checkCudaErrors(cudaBindTextureToArray(transferTex, d_transferFuncArray, channelDesc2));
}
extern "C"
void freeCudaBuffers()
{
checkCudaErrors(cudaFreeArray(d_volumeArray));
checkCudaErrors(cudaFreeArray(d_transferFuncArray));
}
extern "C"
void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH,
float density, float brightness, float transferOffset, float transferScale)
{
d_render<<<gridSize, blockSize>>>(d_output, imageW, imageH, density,
brightness, transferOffset, transferScale);
}
extern "C"
void render_kernel_dim(dim3 gridSize, dim3 blockSize, uint *d_output, uint nx, uint ny, uint nz, uint imageW, uint imageH,
float density, float brightness, float transferOffset, float transferScale)
{
d_render_dim<<<gridSize, blockSize>>>(d_output, nx, ny, nz, imageW, imageH, density,
brightness, transferOffset, transferScale);
}
extern "C"
void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix)
{
checkCudaErrors(cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix));
}
#endif // #ifndef _VOLUMERENDER_KERNEL_CU_
|
1ac17e5f49c16a88df7f8a1ee68dd53f3eef214e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gauge_field_order.h>
namespace quda {
/**
Kernel argument struct
*/
template <typename OutOrder, typename InOrder>
struct CopyGaugeArg {
OutOrder out;
const InOrder in;
int volume;
int faceVolumeCB[QUDA_MAX_DIM];
int nDim;
int geometry;
CopyGaugeArg(const OutOrder &out, const InOrder &in, int volume,
const int *faceVolumeCB, int nDim, int geometry)
: out(out), in(in), volume(volume), nDim(nDim), geometry(geometry) {
for (int d=0; d<nDim; d++) this->faceVolumeCB[d] = faceVolumeCB[d];
}
};
/**
Generic CPU gauge reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGauge(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.geometry; d++) {
for (int x=0; x<arg.volume/2; x++) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, d, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, d, parity);
}
}
}
}
/**
Generic CUDA gauge reordering and packing. Adopts a similar form as
the CPU version, using the same inlined functions.
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
__global__ void copyGaugeKernel(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.geometry; d++) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= arg.volume/2) return;
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, d, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, d, parity);
}
}
}
/**
Generic CPU gauge ghost reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGhost(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.nDim; d++) {
for (int x=0; x<arg.faceVolumeCB[d]; x++) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.loadGhost(in, x, d, parity); // assumes we are loading
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.saveGhost(out, x, d, parity);
}
}
}
}
/**
Generic CUDA kernel for copying the ghost zone. Adopts a similar form as
the CPU version, using the same inlined functions.
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
__global__ void copyGhostKernel(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
int x = blockIdx.x * blockDim.x + threadIdx.x;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.nDim; d++) {
if (x < arg.faceVolumeCB[d]) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.loadGhost(in, x, d, parity); // assumes we are loading
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.saveGhost(out, x, d, parity);
}
}
}
}
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool isGhost>
class CopyGauge : Tunable {
CopyGaugeArg<OutOrder,InOrder> arg;
int size;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0 ;}
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
public:
CopyGauge(CopyGaugeArg<OutOrder,InOrder> &arg) : arg(arg) {
int faceMax = 0;
for (int d=0; d<arg.nDim; d++) {
faceMax = (arg.faceVolumeCB[d] > faceMax ) ? arg.faceVolumeCB[d] : faceMax;
}
size = isGhost ? faceMax : arg.volume/2;
sprintf(vol, "%d", arg.volume/2);
sprintf(aux, "out_stride=%d,in_stride=%d", arg.out.stride, arg.in.stride);
}
virtual ~CopyGauge() { ; }
void apply(const hipStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (!isGhost) {
hipLaunchKernelGGL(( copyGaugeKernel<FloatOut, FloatIn, length, OutOrder, InOrder>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
} else {
hipLaunchKernelGGL(( copyGhostKernel<FloatOut, FloatIn, length, OutOrder, InOrder>)
, dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg);
}
}
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't bother printing the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 0; }
long long bytes() const {
int sites = 4*arg.volume/2;
if (isGhost) {
sites = 0;
for (int d=0; d<4; d++) sites += arg.faceVolumeCB[d];
}
#if __COMPUTE_CAPABILITY__ >= 200
return 2 * sites * ( arg.in.Bytes() + arg.in.hasPhase*sizeof(FloatIn)
+ arg.out.Bytes() + arg.out.hasPhase*sizeof(FloatOut) );
#else
return 2 * sites * ( arg.in.Bytes() + arg.out.Bytes() );
#endif
}
};
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGauge(OutOrder outOrder, const InOrder inOrder, int volume, const int *faceVolumeCB,
int nDim, int geometry, QudaFieldLocation location, int type) {
CopyGaugeArg<OutOrder,InOrder> arg(outOrder, inOrder, volume, faceVolumeCB, nDim, geometry);
if (location == QUDA_CPU_FIELD_LOCATION) {
if (type == 0 || type == 2) {
copyGauge<FloatOut, FloatIn, length>(arg);
}
#ifdef MULTI_GPU // only copy the ghost zone if doing multi-gpu
if (type == 0 || type == 1) {
if (geometry == QUDA_VECTOR_GEOMETRY) copyGhost<FloatOut, FloatIn, length>(arg);
//else warningQuda("Cannot copy for %d geometry gauge field", geometry);
}
#endif
} else if (location == QUDA_CUDA_FIELD_LOCATION) {
// first copy body
if (type == 0 || type == 2) {
CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 0> gaugeCopier(arg);
gaugeCopier.apply(0);
}
#ifdef MULTI_GPU
if (type == 0 || type == 1) {
if (geometry == QUDA_VECTOR_GEOMETRY) {
// now copy ghost
CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 1> ghostCopier(arg);
ghostCopier.apply(0);
} else {
//warningQuda("Cannot copy for %d geometry gauge field", geometry);
}
}
#endif
} else {
errorQuda("Undefined field location %d for copyGauge", location);
}
}
template <typename FloatOut, typename FloatIn, int length, typename InOrder>
void copyGauge(const InOrder &inOrder, GaugeField &out, QudaFieldLocation location,
FloatOut *Out, FloatOut **outGhost, int type) {
int faceVolumeCB[QUDA_MAX_DIM];
for (int i=0; i<4; i++) faceVolumeCB[i] = out.SurfaceCB(i) * out.Nface();
if (out.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) {
if (typeid(FloatOut)==typeid(short) && out.LinkType() == QUDA_ASQTAD_FAT_LINKS) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,19>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
} else {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,18>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
}
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,12>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,8>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#if defined(GPU_STAGGERED_DIRAC) && __COMPUTE_CAPABILITY__ >= 200
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,13>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,9>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order());
}
} else if (out.Order() == QUDA_FLOAT4_GAUGE_ORDER) {
if (out.Reconstruct() == QUDA_RECONSTRUCT_12) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,4,12>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,4,8>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#if defined(GPU_STAGGERED_DIRAC) && __COMPUTE_CAPABILITY__ >= 200
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,4,13>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,4,9>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order());
}
} else if (out.Order() == QUDA_QDP_GAUGE_ORDER) {
#ifdef BUILD_QDP_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(QDPOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("QDP interface has not been built\n");
#endif
} else if (out.Order() == QUDA_QDPJIT_GAUGE_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(QDPJITOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (out.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
#ifdef BUILD_CPS_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(CPSOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("CPS interface has not been built\n");
#endif
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(MILCOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (out.Order() == QUDA_BQCD_GAUGE_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(BQCDOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else if (out.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(TIFROrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field %d order not supported", out.Order());
}
}
template <typename FloatOut, typename FloatIn, int length>
void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location,
FloatOut *Out, FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) {
// reconstruction only supported on FloatN fields currently
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (in.Reconstruct() == QUDA_RECONSTRUCT_NO) {
if (typeid(FloatIn)==typeid(short) && in.LinkType() == QUDA_ASQTAD_FAT_LINKS) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,19>(in, In, inGhost),
out, location, Out, outGhost, type);
} else {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,18>(in, In, inGhost),
out, location, Out, outGhost, type);
}
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_12) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,12>(in, In, inGhost),
out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,8>(in, In, inGhost),
out, location, Out, outGhost, type);
#if defined(GPU_STAGGERED_DIRAC) && __COMPUTE_CAPABILITY__ >= 200
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,13>(in, In, inGhost),
out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,9>(in, In, inGhost),
out, location, Out, outGhost, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order());
}
} else if (in.Order() == QUDA_FLOAT4_GAUGE_ORDER) {
if (in.Reconstruct() == QUDA_RECONSTRUCT_12) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,12>(in, In, inGhost),
out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,8>(in, In, inGhost),
out, location, Out, outGhost, type);
#if defined(GPU_STAGGERED_DIRAC) && __COMPUTE_CAPABILITY__ >= 200
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,13>(in, In, inGhost),
out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,9>(in, In, inGhost),
out, location, Out, outGhost, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order());
}
} else if (in.Order() == QUDA_QDP_GAUGE_ORDER) {
#ifdef BUILD_QDP_INTERFACE
copyGauge<FloatOut,FloatIn,length>(QDPOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("QDP interface has not been built\n");
#endif
} else if (in.Order() == QUDA_QDPJIT_GAUGE_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyGauge<FloatOut,FloatIn,length>(QDPJITOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (in.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
#ifdef BUILD_CPS_INTERFACE
copyGauge<FloatOut,FloatIn,length>(CPSOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("CPS interface has not been built\n");
#endif
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
copyGauge<FloatOut,FloatIn,length>(MILCOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (in.Order() == QUDA_BQCD_GAUGE_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
copyGauge<FloatOut,FloatIn,length>(BQCDOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
copyGauge<FloatOut,FloatIn,length>(TIFROrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field %d order not supported", in.Order());
}
}
void checkMomOrder(const GaugeField &u) {
if (u.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (u.Reconstruct() != QUDA_RECONSTRUCT_10)
errorQuda("Unsuported order %d and reconstruct %d combination", u.Order(), u.Reconstruct());
} else if (u.Order() == QUDA_TIFR_GAUGE_ORDER) {
if (u.Reconstruct() != QUDA_RECONSTRUCT_NO)
errorQuda("Unsuported order %d and reconstruct %d combination", u.Order(), u.Reconstruct());
} else if (u.Order() == QUDA_MILC_GAUGE_ORDER) {
if (u.Reconstruct() != QUDA_RECONSTRUCT_10)
errorQuda("Unsuported order %d and reconstruct %d combination", u.Order(), u.Reconstruct());
} else {
errorQuda("Unsupported gauge field order %d", u.Order());
}
}
template <typename FloatOut, typename FloatIn>
void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out,
FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) {
if (in.Ncolor() != 3 && out.Ncolor() != 3) {
errorQuda("Unsupported number of colors; out.Nc=%d, in.Nc=%d", out.Ncolor(), in.Ncolor());
}
if (out.Geometry() != in.Geometry()) {
errorQuda("Field geometries %d %d do not match", out.Geometry(), in.Geometry());
}
#if __COMPUTE_CAPABILITY__ < 200
if (in.Reconstruct() == QUDA_RECONSTRUCT_13 || in.Reconstruct() == QUDA_RECONSTRUCT_9 ||
out.Reconstruct() == QUDA_RECONSTRUCT_13 || out.Reconstruct() == QUDA_RECONSTRUCT_9)
errorQuda("Reconstruct 9/13 not supported on pre-Fermi architecture");
#endif
if (in.LinkType() != QUDA_ASQTAD_MOM_LINKS && out.LinkType() != QUDA_ASQTAD_MOM_LINKS) {
// we are doing gauge field packing
copyGauge<FloatOut,FloatIn,18>(out, in, location, Out, In, outGhost, inGhost, type);
} else {
if (location != QUDA_CPU_FIELD_LOCATION) errorQuda("Location %d not supported", location);
if (out.Geometry() != QUDA_VECTOR_GEOMETRY) errorQuda("Unsupported geometry %d", out.Geometry());
checkMomOrder(in);
checkMomOrder(out);
int faceVolumeCB[QUDA_MAX_DIM];
for (int d=0; d<in.Ndim(); d++) faceVolumeCB[d] = in.SurfaceCB(d) * in.Nface();
// momentum only currently supported on MILC (10), TIFR (18) and Float2 (10) fields currently
if (out.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, FloatNOrder<FloatIn,10,2,10> >
arg(FloatNOrder<FloatOut,10,2,10>(out, Out),
FloatNOrder<FloatIn,10,2,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, MILCOrder<FloatIn,10> >
arg(FloatNOrder<FloatOut,10,2,10>(out, Out), MILCOrder<FloatIn,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
CopyGaugeArg<FloatNOrder<FloatOut,18,2,11>, TIFROrder<FloatIn,18> >
arg(FloatNOrder<FloatOut,18,2,11>(out, Out), TIFROrder<FloatIn,18>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,18>(arg);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
CopyGaugeArg<MILCOrder<FloatOut,10>, FloatNOrder<FloatIn,10,2,10> >
arg(MILCOrder<FloatOut,10>(out, Out), FloatNOrder<FloatIn,10,2,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
CopyGaugeArg<MILCOrder<FloatOut,10>, MILCOrder<FloatIn,10> >
arg(MILCOrder<FloatOut,10>(out, Out), MILCOrder<FloatIn,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (out.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
// FIX ME - 11 is a misnomer to avoid confusion in template instantiation
CopyGaugeArg<TIFROrder<FloatOut,18>, FloatNOrder<FloatIn,18,2,11> >
arg(TIFROrder<FloatOut,18>(out, Out), FloatNOrder<FloatIn,18,2,11>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,18>(arg);
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
CopyGaugeArg<TIFROrder<FloatOut,18>, TIFROrder<FloatIn,18> >
arg(TIFROrder<FloatOut,18>(out, Out), TIFROrder<FloatIn,18>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field orders %d not supported", out.Order());
}
}
}
// this is the function that is actually called, from here on down we instantiate all required templates
void copyGenericGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location,
void *Out, void *In, void **ghostOut, void **ghostIn, int type) {
// do not copy the ghost zone if it does not exist
if (type == 0 && (in.GhostExchange() != QUDA_GHOST_EXCHANGE_PAD ||
out.GhostExchange() != QUDA_GHOST_EXCHANGE_PAD)) type = 2;
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
copyGauge(out, in, location, (double*)Out, (double*)In, (double**)ghostOut, (double**)ghostIn, type);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyGauge(out, in, location, (double*)Out, (float*)In, (double**)ghostOut, (float**)ghostIn, type);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyGauge(out, in, location, (double*)Out, (short*)In, (double**)ghostOut, (short**)ghostIn, type);
}
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
copyGauge(out, in, location, (float*)Out, (double*)In, (float**)ghostOut, (double**)ghostIn, type);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyGauge(out, in, location, (float*)Out, (float*)In, (float**)ghostOut, (float**)ghostIn, type);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyGauge(out, in, location, (float*)Out, (short*)In, (float**)ghostOut, (short**)ghostIn, type);
}
} else if (out.Precision() == QUDA_HALF_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION){
copyGauge(out, in, location, (short*)Out, (double*)In, (short**)ghostOut, (double**)ghostIn, type);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyGauge(out, in, location, (short*)Out, (float*)In, (short**)ghostOut, (float**)ghostIn, type);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyGauge(out, in, location, (short*)Out, (short*)In, (short**)ghostOut, (short**)ghostIn, type);
}
}
}
} // namespace quda
| 1ac17e5f49c16a88df7f8a1ee68dd53f3eef214e.cu | #include <gauge_field_order.h>
namespace quda {
/**
Kernel argument struct
*/
template <typename OutOrder, typename InOrder>
struct CopyGaugeArg {
OutOrder out;
const InOrder in;
int volume;
int faceVolumeCB[QUDA_MAX_DIM];
int nDim;
int geometry;
CopyGaugeArg(const OutOrder &out, const InOrder &in, int volume,
const int *faceVolumeCB, int nDim, int geometry)
: out(out), in(in), volume(volume), nDim(nDim), geometry(geometry) {
for (int d=0; d<nDim; d++) this->faceVolumeCB[d] = faceVolumeCB[d];
}
};
/**
Generic CPU gauge reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGauge(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.geometry; d++) {
for (int x=0; x<arg.volume/2; x++) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, d, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, d, parity);
}
}
}
}
/**
Generic CUDA gauge reordering and packing. Adopts a similar form as
the CPU version, using the same inlined functions.
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
__global__ void copyGaugeKernel(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.geometry; d++) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x >= arg.volume/2) return;
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.load(in, x, d, parity);
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.save(out, x, d, parity);
}
}
}
/**
Generic CPU gauge ghost reordering and packing
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGhost(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.nDim; d++) {
for (int x=0; x<arg.faceVolumeCB[d]; x++) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.loadGhost(in, x, d, parity); // assumes we are loading
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.saveGhost(out, x, d, parity);
}
}
}
}
/**
Generic CUDA kernel for copying the ghost zone. Adopts a similar form as
the CPU version, using the same inlined functions.
*/
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
__global__ void copyGhostKernel(CopyGaugeArg<OutOrder,InOrder> arg) {
typedef typename mapper<FloatIn>::type RegTypeIn;
typedef typename mapper<FloatOut>::type RegTypeOut;
int x = blockIdx.x * blockDim.x + threadIdx.x;
for (int parity=0; parity<2; parity++) {
for (int d=0; d<arg.nDim; d++) {
if (x < arg.faceVolumeCB[d]) {
RegTypeIn in[length];
RegTypeOut out[length];
arg.in.loadGhost(in, x, d, parity); // assumes we are loading
for (int i=0; i<length; i++) out[i] = in[i];
arg.out.saveGhost(out, x, d, parity);
}
}
}
}
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder, bool isGhost>
class CopyGauge : Tunable {
CopyGaugeArg<OutOrder,InOrder> arg;
int size;
private:
unsigned int sharedBytesPerThread() const { return 0; }
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const { return 0 ;}
bool tuneGridDim() const { return false; } // Don't tune the grid dimensions.
unsigned int minThreads() const { return size; }
public:
CopyGauge(CopyGaugeArg<OutOrder,InOrder> &arg) : arg(arg) {
int faceMax = 0;
for (int d=0; d<arg.nDim; d++) {
faceMax = (arg.faceVolumeCB[d] > faceMax ) ? arg.faceVolumeCB[d] : faceMax;
}
size = isGhost ? faceMax : arg.volume/2;
sprintf(vol, "%d", arg.volume/2);
sprintf(aux, "out_stride=%d,in_stride=%d", arg.out.stride, arg.in.stride);
}
virtual ~CopyGauge() { ; }
void apply(const cudaStream_t &stream) {
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if (!isGhost) {
copyGaugeKernel<FloatOut, FloatIn, length, OutOrder, InOrder>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
} else {
copyGhostKernel<FloatOut, FloatIn, length, OutOrder, InOrder>
<<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg);
}
}
TuneKey tuneKey() const { return TuneKey(vol, typeid(*this).name(), aux); }
std::string paramString(const TuneParam ¶m) const { // Don't bother printing the grid dim.
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << "), ";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const { return 0; }
long long bytes() const {
int sites = 4*arg.volume/2;
if (isGhost) {
sites = 0;
for (int d=0; d<4; d++) sites += arg.faceVolumeCB[d];
}
#if __COMPUTE_CAPABILITY__ >= 200
return 2 * sites * ( arg.in.Bytes() + arg.in.hasPhase*sizeof(FloatIn)
+ arg.out.Bytes() + arg.out.hasPhase*sizeof(FloatOut) );
#else
return 2 * sites * ( arg.in.Bytes() + arg.out.Bytes() );
#endif
}
};
template <typename FloatOut, typename FloatIn, int length, typename OutOrder, typename InOrder>
void copyGauge(OutOrder outOrder, const InOrder inOrder, int volume, const int *faceVolumeCB,
int nDim, int geometry, QudaFieldLocation location, int type) {
CopyGaugeArg<OutOrder,InOrder> arg(outOrder, inOrder, volume, faceVolumeCB, nDim, geometry);
if (location == QUDA_CPU_FIELD_LOCATION) {
if (type == 0 || type == 2) {
copyGauge<FloatOut, FloatIn, length>(arg);
}
#ifdef MULTI_GPU // only copy the ghost zone if doing multi-gpu
if (type == 0 || type == 1) {
if (geometry == QUDA_VECTOR_GEOMETRY) copyGhost<FloatOut, FloatIn, length>(arg);
//else warningQuda("Cannot copy for %d geometry gauge field", geometry);
}
#endif
} else if (location == QUDA_CUDA_FIELD_LOCATION) {
// first copy body
if (type == 0 || type == 2) {
CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 0> gaugeCopier(arg);
gaugeCopier.apply(0);
}
#ifdef MULTI_GPU
if (type == 0 || type == 1) {
if (geometry == QUDA_VECTOR_GEOMETRY) {
// now copy ghost
CopyGauge<FloatOut, FloatIn, length, OutOrder, InOrder, 1> ghostCopier(arg);
ghostCopier.apply(0);
} else {
//warningQuda("Cannot copy for %d geometry gauge field", geometry);
}
}
#endif
} else {
errorQuda("Undefined field location %d for copyGauge", location);
}
}
template <typename FloatOut, typename FloatIn, int length, typename InOrder>
void copyGauge(const InOrder &inOrder, GaugeField &out, QudaFieldLocation location,
FloatOut *Out, FloatOut **outGhost, int type) {
int faceVolumeCB[QUDA_MAX_DIM];
for (int i=0; i<4; i++) faceVolumeCB[i] = out.SurfaceCB(i) * out.Nface();
if (out.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (out.Reconstruct() == QUDA_RECONSTRUCT_NO) {
if (typeid(FloatOut)==typeid(short) && out.LinkType() == QUDA_ASQTAD_FAT_LINKS) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,19>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
} else {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,18>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
}
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_12) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,12>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,8>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#if defined(GPU_STAGGERED_DIRAC) && __COMPUTE_CAPABILITY__ >= 200
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,13>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,2,9>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order());
}
} else if (out.Order() == QUDA_FLOAT4_GAUGE_ORDER) {
if (out.Reconstruct() == QUDA_RECONSTRUCT_12) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,4,12>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_8) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,4,8>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#if defined(GPU_STAGGERED_DIRAC) && __COMPUTE_CAPABILITY__ >= 200
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_13) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,4,13>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
} else if (out.Reconstruct() == QUDA_RECONSTRUCT_9) {
copyGauge<FloatOut,FloatIn,length>
(FloatNOrder<FloatOut,length,4,9>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", out.Reconstruct(), out.Order());
}
} else if (out.Order() == QUDA_QDP_GAUGE_ORDER) {
#ifdef BUILD_QDP_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(QDPOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("QDP interface has not been built\n");
#endif
} else if (out.Order() == QUDA_QDPJIT_GAUGE_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(QDPJITOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (out.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
#ifdef BUILD_CPS_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(CPSOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("CPS interface has not been built\n");
#endif
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(MILCOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (out.Order() == QUDA_BQCD_GAUGE_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(BQCDOrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else if (out.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
copyGauge<FloatOut,FloatIn,length>
(TIFROrder<FloatOut,length>(out, Out, outGhost), inOrder, out.Volume(),
faceVolumeCB, out.Ndim(), out.Geometry(), location, type);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field %d order not supported", out.Order());
}
}
template <typename FloatOut, typename FloatIn, int length>
void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location,
FloatOut *Out, FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) {
// reconstruction only supported on FloatN fields currently
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (in.Reconstruct() == QUDA_RECONSTRUCT_NO) {
if (typeid(FloatIn)==typeid(short) && in.LinkType() == QUDA_ASQTAD_FAT_LINKS) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,19>(in, In, inGhost),
out, location, Out, outGhost, type);
} else {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,18>(in, In, inGhost),
out, location, Out, outGhost, type);
}
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_12) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,12>(in, In, inGhost),
out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,8>(in, In, inGhost),
out, location, Out, outGhost, type);
#if defined(GPU_STAGGERED_DIRAC) && __COMPUTE_CAPABILITY__ >= 200
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,13>(in, In, inGhost),
out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,2,9>(in, In, inGhost),
out, location, Out, outGhost, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order());
}
} else if (in.Order() == QUDA_FLOAT4_GAUGE_ORDER) {
if (in.Reconstruct() == QUDA_RECONSTRUCT_12) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,12>(in, In, inGhost),
out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_8) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,8>(in, In, inGhost),
out, location, Out, outGhost, type);
#if defined(GPU_STAGGERED_DIRAC) && __COMPUTE_CAPABILITY__ >= 200
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_13) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,13>(in, In, inGhost),
out, location, Out, outGhost, type);
} else if (in.Reconstruct() == QUDA_RECONSTRUCT_9) {
copyGauge<FloatOut,FloatIn,length> (FloatNOrder<FloatIn,length,4,9>(in, In, inGhost),
out, location, Out, outGhost, type);
#endif
} else {
errorQuda("Reconstruction %d and order %d not supported", in.Reconstruct(), in.Order());
}
} else if (in.Order() == QUDA_QDP_GAUGE_ORDER) {
#ifdef BUILD_QDP_INTERFACE
copyGauge<FloatOut,FloatIn,length>(QDPOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("QDP interface has not been built\n");
#endif
} else if (in.Order() == QUDA_QDPJIT_GAUGE_ORDER) {
#ifdef BUILD_QDPJIT_INTERFACE
copyGauge<FloatOut,FloatIn,length>(QDPJITOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("QDPJIT interface has not been built\n");
#endif
} else if (in.Order() == QUDA_CPS_WILSON_GAUGE_ORDER) {
#ifdef BUILD_CPS_INTERFACE
copyGauge<FloatOut,FloatIn,length>(CPSOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("CPS interface has not been built\n");
#endif
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
copyGauge<FloatOut,FloatIn,length>(MILCOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (in.Order() == QUDA_BQCD_GAUGE_ORDER) {
#ifdef BUILD_BQCD_INTERFACE
copyGauge<FloatOut,FloatIn,length>(BQCDOrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("BQCD interface has not been built\n");
#endif
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
copyGauge<FloatOut,FloatIn,length>(TIFROrder<FloatIn,length>(in, In, inGhost),
out, location, Out, outGhost, type);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field %d order not supported", in.Order());
}
}
void checkMomOrder(const GaugeField &u) {
if (u.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (u.Reconstruct() != QUDA_RECONSTRUCT_10)
errorQuda("Unsuported order %d and reconstruct %d combination", u.Order(), u.Reconstruct());
} else if (u.Order() == QUDA_TIFR_GAUGE_ORDER) {
if (u.Reconstruct() != QUDA_RECONSTRUCT_NO)
errorQuda("Unsuported order %d and reconstruct %d combination", u.Order(), u.Reconstruct());
} else if (u.Order() == QUDA_MILC_GAUGE_ORDER) {
if (u.Reconstruct() != QUDA_RECONSTRUCT_10)
errorQuda("Unsuported order %d and reconstruct %d combination", u.Order(), u.Reconstruct());
} else {
errorQuda("Unsupported gauge field order %d", u.Order());
}
}
template <typename FloatOut, typename FloatIn>
void copyGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location, FloatOut *Out,
FloatIn *In, FloatOut **outGhost, FloatIn **inGhost, int type) {
if (in.Ncolor() != 3 && out.Ncolor() != 3) {
errorQuda("Unsupported number of colors; out.Nc=%d, in.Nc=%d", out.Ncolor(), in.Ncolor());
}
if (out.Geometry() != in.Geometry()) {
errorQuda("Field geometries %d %d do not match", out.Geometry(), in.Geometry());
}
#if __COMPUTE_CAPABILITY__ < 200
if (in.Reconstruct() == QUDA_RECONSTRUCT_13 || in.Reconstruct() == QUDA_RECONSTRUCT_9 ||
out.Reconstruct() == QUDA_RECONSTRUCT_13 || out.Reconstruct() == QUDA_RECONSTRUCT_9)
errorQuda("Reconstruct 9/13 not supported on pre-Fermi architecture");
#endif
if (in.LinkType() != QUDA_ASQTAD_MOM_LINKS && out.LinkType() != QUDA_ASQTAD_MOM_LINKS) {
// we are doing gauge field packing
copyGauge<FloatOut,FloatIn,18>(out, in, location, Out, In, outGhost, inGhost, type);
} else {
if (location != QUDA_CPU_FIELD_LOCATION) errorQuda("Location %d not supported", location);
if (out.Geometry() != QUDA_VECTOR_GEOMETRY) errorQuda("Unsupported geometry %d", out.Geometry());
checkMomOrder(in);
checkMomOrder(out);
int faceVolumeCB[QUDA_MAX_DIM];
for (int d=0; d<in.Ndim(); d++) faceVolumeCB[d] = in.SurfaceCB(d) * in.Nface();
// momentum only currently supported on MILC (10), TIFR (18) and Float2 (10) fields currently
if (out.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, FloatNOrder<FloatIn,10,2,10> >
arg(FloatNOrder<FloatOut,10,2,10>(out, Out),
FloatNOrder<FloatIn,10,2,10>(in, In), in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
CopyGaugeArg<FloatNOrder<FloatOut,10,2,10>, MILCOrder<FloatIn,10> >
arg(FloatNOrder<FloatOut,10,2,10>(out, Out), MILCOrder<FloatIn,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
CopyGaugeArg<FloatNOrder<FloatOut,18,2,11>, TIFROrder<FloatIn,18> >
arg(FloatNOrder<FloatOut,18,2,11>(out, Out), TIFROrder<FloatIn,18>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,18>(arg);
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
} else if (out.Order() == QUDA_MILC_GAUGE_ORDER) {
#ifdef BUILD_MILC_INTERFACE
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
CopyGaugeArg<MILCOrder<FloatOut,10>, FloatNOrder<FloatIn,10,2,10> >
arg(MILCOrder<FloatOut,10>(out, Out), FloatNOrder<FloatIn,10,2,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else if (in.Order() == QUDA_MILC_GAUGE_ORDER) {
CopyGaugeArg<MILCOrder<FloatOut,10>, MILCOrder<FloatIn,10> >
arg(MILCOrder<FloatOut,10>(out, Out), MILCOrder<FloatIn,10>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
#else
errorQuda("MILC interface has not been built\n");
#endif
} else if (out.Order() == QUDA_TIFR_GAUGE_ORDER) {
#ifdef BUILD_TIFR_INTERFACE
if (in.Order() == QUDA_FLOAT2_GAUGE_ORDER) {
// FIX ME - 11 is a misnomer to avoid confusion in template instantiation
CopyGaugeArg<TIFROrder<FloatOut,18>, FloatNOrder<FloatIn,18,2,11> >
arg(TIFROrder<FloatOut,18>(out, Out), FloatNOrder<FloatIn,18,2,11>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,18>(arg);
} else if (in.Order() == QUDA_TIFR_GAUGE_ORDER) {
CopyGaugeArg<TIFROrder<FloatOut,18>, TIFROrder<FloatIn,18> >
arg(TIFROrder<FloatOut,18>(out, Out), TIFROrder<FloatIn,18>(in, In),
in.Volume(), faceVolumeCB, in.Ndim(), in.Geometry());
copyGauge<FloatOut,FloatIn,10>(arg);
} else {
errorQuda("Gauge field orders %d not supported", in.Order());
}
#else
errorQuda("TIFR interface has not been built\n");
#endif
} else {
errorQuda("Gauge field orders %d not supported", out.Order());
}
}
}
// this is the function that is actually called, from here on down we instantiate all required templates
void copyGenericGauge(GaugeField &out, const GaugeField &in, QudaFieldLocation location,
void *Out, void *In, void **ghostOut, void **ghostIn, int type) {
// do not copy the ghost zone if it does not exist
if (type == 0 && (in.GhostExchange() != QUDA_GHOST_EXCHANGE_PAD ||
out.GhostExchange() != QUDA_GHOST_EXCHANGE_PAD)) type = 2;
if (out.Precision() == QUDA_DOUBLE_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
copyGauge(out, in, location, (double*)Out, (double*)In, (double**)ghostOut, (double**)ghostIn, type);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyGauge(out, in, location, (double*)Out, (float*)In, (double**)ghostOut, (float**)ghostIn, type);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyGauge(out, in, location, (double*)Out, (short*)In, (double**)ghostOut, (short**)ghostIn, type);
}
} else if (out.Precision() == QUDA_SINGLE_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION) {
copyGauge(out, in, location, (float*)Out, (double*)In, (float**)ghostOut, (double**)ghostIn, type);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyGauge(out, in, location, (float*)Out, (float*)In, (float**)ghostOut, (float**)ghostIn, type);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyGauge(out, in, location, (float*)Out, (short*)In, (float**)ghostOut, (short**)ghostIn, type);
}
} else if (out.Precision() == QUDA_HALF_PRECISION) {
if (in.Precision() == QUDA_DOUBLE_PRECISION){
copyGauge(out, in, location, (short*)Out, (double*)In, (short**)ghostOut, (double**)ghostIn, type);
} else if (in.Precision() == QUDA_SINGLE_PRECISION) {
copyGauge(out, in, location, (short*)Out, (float*)In, (short**)ghostOut, (float**)ghostIn, type);
} else if (in.Precision() == QUDA_HALF_PRECISION) {
copyGauge(out, in, location, (short*)Out, (short*)In, (short**)ghostOut, (short**)ghostIn, type);
}
}
}
} // namespace quda
|
96448bc23bd4b79c84aab16dac06416b5ec95253.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//-----------------------------------------------------
// Method to compute the Galerkin product: A_c=R*A*P
// The number of non-zeros per row is computed using Thrust
// The coarse matrix is created by custom kernel
// --------------------------------------------------------
#include <aggregation/coarseAgenerators/hybrid_coarse_A_generator.h>
#include <thrust/system/detail/generic/reduce_by_key.h>
#include <cusp/detail/format_utils.h> //indices_to_offsets
#include <thrust/remove.h>
#include <thrust/extrema.h>
#include <thrust/adjacent_difference.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/unique.h>
#include <error.h>
#include <cutil.h>
#include <util.h>
#include <types.h>
namespace amgx
{
namespace aggregation
{
typedef thrust::tuple<int, int> tuple_t;
struct isDiagonal
{
__host__ __device__
bool operator()(tuple_t IJ)
{
return ( IJ.get<0>() == IJ.get<1>() );
}
};
// --------------------
// Kernels
// --------------------
// Kernel to store aggregate I of each fine point index i
template <typename IndexType>
__global__
void iToIKernel(const IndexType *row_offsets, const IndexType *aggregates, IndexType *I, const int num_rows)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_rows; tid += gridDim.x * blockDim.x)
{
int agg = aggregates[tid];
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
I[j] = agg;
}
}
}
// Kernel to store aggregate J of each fine point index j
template <typename IndexType>
__global__
void jToJKernel(const IndexType *column_indices, const IndexType *aggregates, IndexType *J, const int num_entries)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_entries; tid += gridDim.x * blockDim.x)
{
int j = column_indices[tid];
J[tid] = aggregates[j];
}
}
// Kernel to fill matrix Ac for 4x4 block matrices
template <typename IndexType, typename ValueType, int max_nonzeros_per_row, int threads_per_block, int bsize_sq, int log_bsize_sq>
__global__
void fillA_4by4Blocks_Thrust_Kernel(const IndexType *A_row_offsets, const IndexType *A_column_indices, const IndexType *A_dia_values, const ValueType *A_nonzero_values, const IndexType *R_row_offsets, const IndexType *R_column_indices, const IndexType *Ac_row_offsets, const IndexType *Ac_column_indices, IndexType *Ac_dia_values, ValueType *Ac_nonzero_values, const IndexType *aggregates, const int num_aggregates, const int num_threads)
{
const int tid = threadIdx.x;
const int I = blockIdx.x * threads_per_block + tid;
const int halfwarp_id = tid >> log_bsize_sq;
const int mat_entry_index = tid & (bsize_sq - 1);
volatile __shared__ IndexType current_cell_to_read[threads_per_block / bsize_sq];
volatile __shared__ IndexType current_cell_to_write[threads_per_block / bsize_sq];
volatile __shared__ IndexType max_nonzeros_i[threads_per_block];
volatile __shared__ IndexType max_nonzeros_j[threads_per_block];
const int size_hash_reg = (2. / 3.) * max_nonzeros_per_row; // 2/3 is arbitrary
const int size_hash_shared = max_nonzeros_per_row - size_hash_reg;
IndexType shared_offset = threadIdx.x * size_hash_shared;
volatile __shared__ IndexType hash_shared[size_hash_shared * threads_per_block];
IndexType hash_reg[size_hash_reg];
int Ac_offset_min;
int Ac_offset_max;
#pragma unroll
for (int i = 0; i < size_hash_reg; i++)
{
hash_reg[i] = -1;
}
for (int i = 0; i < size_hash_shared; i++)
{
hash_shared[shared_offset + i] = -1;
}
// Find maximum number of nonzeros for threads in same halfwarp
int i_offset_min, i_offset_max;
if (I < num_aggregates)
{
i_offset_min = R_row_offsets[I];
i_offset_max = R_row_offsets[I + 1];
max_nonzeros_i[tid] = i_offset_max - i_offset_min;
Ac_offset_min = Ac_row_offsets[I];
Ac_offset_max = Ac_row_offsets[I + 1];
for (int k = Ac_offset_min, icount = 0; k < Ac_offset_max; k++, icount++)
{
if (icount < size_hash_reg)
{
hash_reg[icount] = Ac_column_indices[k];
}
else
{
hash_shared[shared_offset + icount - size_hash_reg] = Ac_column_indices[k];
}
}
}
else
{
i_offset_min = 0;
i_offset_max = 0;
max_nonzeros_i[tid] = 0;
}
int max_loop_i = 0;
for (int m = 0; m < bsize_sq; m++)
{
int max_nonzeros = max_nonzeros_i[halfwarp_id * bsize_sq + m];
if (max_nonzeros > max_loop_i)
{
max_loop_i = max_nonzeros;
}
}
// For all threads that could do useful work
if (I < num_threads)
{
for (int iloop = 0, i_offset = i_offset_min; iloop < max_loop_i; iloop++, i_offset++)
{
// Contribution from A_dia_values of fine point i
int i = (i_offset < i_offset_max) ? R_column_indices[i_offset] : -1 ;
// Have threads collaborate to load in coalesced fashion
for (int m = 0; m < bsize_sq; m++)
{
// Write which cell to load in shared memory buffer
if (mat_entry_index == m)
{
current_cell_to_read[halfwarp_id] = i;
current_cell_to_write[halfwarp_id] = I;
}
// All threads read from shared which cell to read and write
int cell_to_read = current_cell_to_read[halfwarp_id];
int cell_to_write = current_cell_to_write[halfwarp_id];
// Here all threads in half-warp will take same path
if (cell_to_read != -1)
{
Ac_nonzero_values[ bsize_sq * Ac_dia_values[cell_to_write] + mat_entry_index] += A_nonzero_values[bsize_sq * A_dia_values[cell_to_read] + mat_entry_index];
}
}
// Contribution from A_nonzero_values of fine point i
// Find maximum number of nonzeros for threads in same halfwarp_id
int j_offset_min, j_offset_max;
if (i != -1)
{
j_offset_min = A_row_offsets[i];
j_offset_max = A_row_offsets[i + 1];
max_nonzeros_j[tid] = j_offset_max - j_offset_min;
}
else
{
j_offset_min = 0;
j_offset_max = 0;
max_nonzeros_j[tid] = 0;
}
int max_loop_j = 0;
for (int m = 0; m < bsize_sq; m++)
{
int max_nonzeros = max_nonzeros_j[halfwarp_id * bsize_sq + m];
if (max_nonzeros > max_loop_j)
{
max_loop_j = max_nonzeros;
}
}
// Add contribution from nonzero_values of A
int j_offset_to_read;
for (int jloop = 0, j_offset = j_offset_min; jloop < max_loop_j; jloop++, j_offset++)
{
int J, j, k;
if (j_offset < j_offset_max)
{
j_offset_to_read = j_offset;
j = A_column_indices[j_offset];
J = aggregates[j];
// Find index k where to store the data and create A_column_indices
if (I != J)
{
// This weird construct is to allow loop unrolling and avoid register spilling (see original version below)
int found = 0;
k = Ac_offset_min;
#pragma unroll
for (int icount = 0; icount < max_nonzeros_per_row; icount++)
{
if (k < Ac_offset_max)
{
if (found == 0)
{
int Jtemp = (icount < size_hash_reg) ? hash_reg[icount] : hash_shared[shared_offset + icount - size_hash_reg];
if (J == Jtemp)
{
found = 1;
}
if (found == 0) { k++; }
}
}
}
} // if I != J
} // if j_offset < j_offset_max
else
{
j_offset_to_read = -1;
}
// Have threads collaborate to load in coalesced fashion
for (int m = 0; m < bsize_sq; m++)
{
// Write which cell to load in shared memory buffer
if (mat_entry_index == m)
{
current_cell_to_read[halfwarp_id] = j_offset_to_read;
if (I != J)
{
current_cell_to_write[halfwarp_id] = -k;
}
else
{
current_cell_to_write[halfwarp_id] = I + 1;
}
}
// All threads read from shared which cell to read and write
int cell_to_read = current_cell_to_read[halfwarp_id];
int cell_to_write = current_cell_to_write[halfwarp_id];
if (cell_to_read != -1)
{
if (cell_to_write <= 0)
{
Ac_nonzero_values[(-cell_to_write)*bsize_sq + mat_entry_index] += A_nonzero_values[cell_to_read * bsize_sq + mat_entry_index];
}
else
{
Ac_nonzero_values[ Ac_dia_values[ cell_to_write - 1 ] * bsize_sq + mat_entry_index] += A_nonzero_values[cell_to_read * bsize_sq + mat_entry_index];
}
}
}
} // j_offset_loop
} // i_offset_loop
} // if tid < num_threads
}
// Constructor
template<class T_Config>
HybridCoarseAGeneratorBase<T_Config>::HybridCoarseAGeneratorBase()
{
}
//-----------------------------------------------------
// Method to compute the Galerkin product: A_c=R*A*P
// The number of non-zeros per row is computed using Thrust
// The coarse matrix is created by custom kernel
//-----------------------------------------------------
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void HybridCoarseAGenerator<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_4x4(const Matrix<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &A, Matrix<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &Ac, const typename Matrix_d::IVector &aggregates, const typename Matrix_d::IVector &R_row_offsets, const typename Matrix_d::IVector &R_column_indices, const int num_aggregates)
{
// supports both DIA properties
if (!A.hasProps(DIAG))
{
FatalError("Hybryd coarser does not support inside diagonal yet\n", AMGX_ERR_NOT_IMPLEMENTED);
}
if (A.get_num_nz() == 0)
{
FatalError("Hybryd coarser does not work correctly with diagonal matrices\n", AMGX_ERR_NOT_IMPLEMENTED);
}
typename Matrix_d::IVector I(A.get_num_nz(), -1);
typename Matrix_d::IVector J(A.get_num_nz(), -1);
typedef device_vector_alloc<IndexType> IntVector;
typedef typename IntVector::iterator IntIterator;
typedef thrust::tuple< IntIterator, IntIterator> IntIteratorTuple;
typedef thrust::zip_iterator<IntIteratorTuple> ZipIterator;
ZipIterator new_end;
const int block_size_I = 128;
const int block_size_J = 256;
const int num_blocks_I = ::min( AMGX_GRID_MAX_SIZE, (int) ((A.get_num_rows() - 1) / block_size_I + 1));
const int num_blocks_J = ::min( AMGX_GRID_MAX_SIZE, (int) ((A.get_num_nz() - 1) / block_size_J + 1));
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_values_ptr = A.diag.raw();
const ValueType *A_nonzero_values_ptr = A.values.raw();
const IndexType *aggregates_ptr = aggregates.raw();
IndexType *I_ptr = I.raw();
IndexType *J_ptr = J.raw();
// Kernel to fill array I with aggregates number for fine points i
hipLaunchKernelGGL(( iToIKernel) , dim3(num_blocks_I), dim3(block_size_I), 0, 0, A_row_offsets_ptr, aggregates_ptr, I_ptr, (int) A.get_num_rows());
cudaCheckError();
// Kernel to fill array J with aggregates number for fine points j
hipLaunchKernelGGL(( jToJKernel) , dim3(num_blocks_J), dim3(block_size_J), 0, 0, A_column_indices_ptr, aggregates_ptr, J_ptr, (int) A.get_num_nz());
cudaCheckError();
// Sort (I,J) by rows and columns (I,J)
IVector permutation(A.get_num_nz());
thrust::sequence(permutation.begin(), permutation.end());
cudaCheckError();
// compute permutation and sort by (I,J)
{
IVector temp(J);
thrust::stable_sort_by_key(temp.begin(), temp.end(), permutation.begin());
cudaCheckError();
temp = I;
//I = temp;
thrust_wrapper::gather(permutation.begin(), permutation.end(), temp.begin(), I.begin());
cudaCheckError();
thrust::stable_sort_by_key(I.begin(), I.end(), permutation.begin());
cudaCheckError();
temp = J;
//J = temp;
thrust_wrapper::gather(permutation.begin(), permutation.end(), temp.begin(), J.begin());
cudaCheckError();
}
// Remove duplicate tuples
new_end = thrust::unique(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())), thrust::equal_to < thrust::tuple<IndexType, IndexType> >());
cudaCheckError();
IntIteratorTuple endTuple = new_end.get_iterator_tuple();
I.erase(thrust::get<0>(endTuple), I.end());
J.erase(thrust::get<1>(endTuple), J.end());
// Remove diagonal terms
new_end = thrust::remove_if(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())), isDiagonal() );
cudaCheckError();
endTuple = new_end.get_iterator_tuple();
I.erase(thrust::get<0>(endTuple), I.end());
J.erase(thrust::get<1>(endTuple), J.end());
int nonzero_blocks = J.size();
// Resize Ac
Ac.addProps(CSR);
if (A.hasProps(DIAG)) { Ac.addProps(DIAG); }
Ac.resize(num_aggregates, num_aggregates, nonzero_blocks, A.get_block_dimy(), A.get_block_dimx());
//Ac.resize(num_aggregates,num_aggregates,nonzero_blocks,A.get_block_dimy(),A.get_block_dimx(), 1);
// Ac.column_indices
Ac.col_indices = J;
J.clear();
J.shrink_to_fit();
// Convert array new_row_indices to offsets
cusp::detail::indices_to_offsets(I, Ac.row_offsets);
cudaCheckError();
I.resize(Ac.row_offsets.size());
// Compute the maximum number of nonzeros
thrust::adjacent_difference(Ac.row_offsets.begin(), Ac.row_offsets.end(), I.begin());
cudaCheckError();
const IndexType max_nonzero_per_row = *thrust::max_element(I.begin() + 1, I.end());
cudaCheckError();
//std::cout << "max_nonzero_per_row" << max_nonzero_per_row << std::endl;
I.clear();
I.shrink_to_fit();
const IndexType *R_row_offsets_ptr = R_row_offsets.raw();
const IndexType *R_column_indices_ptr = R_column_indices.raw();
// Get max_nonzero_per_row
//const IndexType max_nonzero_per_row = 32;
const int threads_per_block = 128;
// Store the column_indices in a register
// Option to print the number of nonzeros distribution
//printNonzeroStats(Ac_row_offsets_temp,num_aggregates);
// Resize Ac and doing exclusive scan on Ac_row_offsets_temp
IndexType *Ac_row_offsets_ptr = Ac.row_offsets.raw();
IndexType *Ac_column_indices_ptr = Ac.col_indices.raw();
IndexType *Ac_dia_values_ptr = Ac.diag.raw();
ValueType *Ac_nonzero_values_ptr = Ac.values.raw();
// Now create Ac.dia_values and Ac.nonzero_values
//thrust::fill(Ac.diag.begin(),Ac.diag.end(),0.);
thrust::fill(Ac.values.begin(), Ac.values.end(), 0.);
cudaCheckError();
// Coalesced version of kernel to fill A
const int num_threads = ( ( num_aggregates + 15) / 16 ) * 16;
const int num_blocks2 = ( ( num_threads + threads_per_block - 1) / threads_per_block );
if (max_nonzero_per_row < 16)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 16, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 20)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 20, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 24)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 24, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 28)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 28, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 32)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 32, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 36)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 36, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 40)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 40, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 44)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 44, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 48)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 48, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 52)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 52, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 56)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 56, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 60)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 60, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 64)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 64, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 128)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 128, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 256)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 256, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 512)
{
hipLaunchKernelGGL(( fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 256, threads_per_block, 16, 4>) , dim3(num_blocks2), dim3(threads_per_block), 0, 0, A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else
{
FatalError("Maximum number of nonzeros is too large", AMGX_ERR_BAD_PARAMETERS);
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void HybridCoarseAGenerator<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_4x4(const Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &A, Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates)
{
FatalError("Host is unsupported for HybridCoarseAGenerator", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
// ------------------------------------------------
template <class T_Config>
void HybridCoarseAGeneratorBase<T_Config>::computeAOperator(const Matrix<T_Config> &A, Matrix<T_Config> &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates)
{
Ac.set_initialized(0);
if (A.get_block_dimx() == 4 && A.get_block_dimy() == 4)
{
computeAOperator_4x4( A, Ac, aggregates, R_row_offsets, R_column_indices, num_aggregates );
}
else
{
FatalError("Unsupported block size for HybridCoarseAGenerator", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
Ac.set_initialized(1);
}
// ---------------------------
// Explict instantiations
// ---------------------------
#define AMGX_CASE_LINE(CASE) template class HybridCoarseAGeneratorBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class HybridCoarseAGenerator<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
| 96448bc23bd4b79c84aab16dac06416b5ec95253.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//-----------------------------------------------------
// Method to compute the Galerkin product: A_c=R*A*P
// The number of non-zeros per row is computed using Thrust
// The coarse matrix is created by custom kernel
// --------------------------------------------------------
#include <aggregation/coarseAgenerators/hybrid_coarse_A_generator.h>
#include <thrust/system/detail/generic/reduce_by_key.h>
#include <cusp/detail/format_utils.h> //indices_to_offsets
#include <thrust/remove.h>
#include <thrust/extrema.h>
#include <thrust/adjacent_difference.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/unique.h>
#include <error.h>
#include <cutil.h>
#include <util.h>
#include <types.h>
namespace amgx
{
namespace aggregation
{
typedef thrust::tuple<int, int> tuple_t;
struct isDiagonal
{
__host__ __device__
bool operator()(tuple_t IJ)
{
return ( IJ.get<0>() == IJ.get<1>() );
}
};
// --------------------
// Kernels
// --------------------
// Kernel to store aggregate I of each fine point index i
template <typename IndexType>
__global__
void iToIKernel(const IndexType *row_offsets, const IndexType *aggregates, IndexType *I, const int num_rows)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_rows; tid += gridDim.x * blockDim.x)
{
int agg = aggregates[tid];
for (int j = row_offsets[tid]; j < row_offsets[tid + 1]; j++)
{
I[j] = agg;
}
}
}
// Kernel to store aggregate J of each fine point index j
template <typename IndexType>
__global__
void jToJKernel(const IndexType *column_indices, const IndexType *aggregates, IndexType *J, const int num_entries)
{
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < num_entries; tid += gridDim.x * blockDim.x)
{
int j = column_indices[tid];
J[tid] = aggregates[j];
}
}
// Kernel to fill matrix Ac for 4x4 block matrices
template <typename IndexType, typename ValueType, int max_nonzeros_per_row, int threads_per_block, int bsize_sq, int log_bsize_sq>
__global__
void fillA_4by4Blocks_Thrust_Kernel(const IndexType *A_row_offsets, const IndexType *A_column_indices, const IndexType *A_dia_values, const ValueType *A_nonzero_values, const IndexType *R_row_offsets, const IndexType *R_column_indices, const IndexType *Ac_row_offsets, const IndexType *Ac_column_indices, IndexType *Ac_dia_values, ValueType *Ac_nonzero_values, const IndexType *aggregates, const int num_aggregates, const int num_threads)
{
const int tid = threadIdx.x;
const int I = blockIdx.x * threads_per_block + tid;
const int halfwarp_id = tid >> log_bsize_sq;
const int mat_entry_index = tid & (bsize_sq - 1);
volatile __shared__ IndexType current_cell_to_read[threads_per_block / bsize_sq];
volatile __shared__ IndexType current_cell_to_write[threads_per_block / bsize_sq];
volatile __shared__ IndexType max_nonzeros_i[threads_per_block];
volatile __shared__ IndexType max_nonzeros_j[threads_per_block];
const int size_hash_reg = (2. / 3.) * max_nonzeros_per_row; // 2/3 is arbitrary
const int size_hash_shared = max_nonzeros_per_row - size_hash_reg;
IndexType shared_offset = threadIdx.x * size_hash_shared;
volatile __shared__ IndexType hash_shared[size_hash_shared * threads_per_block];
IndexType hash_reg[size_hash_reg];
int Ac_offset_min;
int Ac_offset_max;
#pragma unroll
for (int i = 0; i < size_hash_reg; i++)
{
hash_reg[i] = -1;
}
for (int i = 0; i < size_hash_shared; i++)
{
hash_shared[shared_offset + i] = -1;
}
// Find maximum number of nonzeros for threads in same halfwarp
int i_offset_min, i_offset_max;
if (I < num_aggregates)
{
i_offset_min = R_row_offsets[I];
i_offset_max = R_row_offsets[I + 1];
max_nonzeros_i[tid] = i_offset_max - i_offset_min;
Ac_offset_min = Ac_row_offsets[I];
Ac_offset_max = Ac_row_offsets[I + 1];
for (int k = Ac_offset_min, icount = 0; k < Ac_offset_max; k++, icount++)
{
if (icount < size_hash_reg)
{
hash_reg[icount] = Ac_column_indices[k];
}
else
{
hash_shared[shared_offset + icount - size_hash_reg] = Ac_column_indices[k];
}
}
}
else
{
i_offset_min = 0;
i_offset_max = 0;
max_nonzeros_i[tid] = 0;
}
int max_loop_i = 0;
for (int m = 0; m < bsize_sq; m++)
{
int max_nonzeros = max_nonzeros_i[halfwarp_id * bsize_sq + m];
if (max_nonzeros > max_loop_i)
{
max_loop_i = max_nonzeros;
}
}
// For all threads that could do useful work
if (I < num_threads)
{
for (int iloop = 0, i_offset = i_offset_min; iloop < max_loop_i; iloop++, i_offset++)
{
// Contribution from A_dia_values of fine point i
int i = (i_offset < i_offset_max) ? R_column_indices[i_offset] : -1 ;
// Have threads collaborate to load in coalesced fashion
for (int m = 0; m < bsize_sq; m++)
{
// Write which cell to load in shared memory buffer
if (mat_entry_index == m)
{
current_cell_to_read[halfwarp_id] = i;
current_cell_to_write[halfwarp_id] = I;
}
// All threads read from shared which cell to read and write
int cell_to_read = current_cell_to_read[halfwarp_id];
int cell_to_write = current_cell_to_write[halfwarp_id];
// Here all threads in half-warp will take same path
if (cell_to_read != -1)
{
Ac_nonzero_values[ bsize_sq * Ac_dia_values[cell_to_write] + mat_entry_index] += A_nonzero_values[bsize_sq * A_dia_values[cell_to_read] + mat_entry_index];
}
}
// Contribution from A_nonzero_values of fine point i
// Find maximum number of nonzeros for threads in same halfwarp_id
int j_offset_min, j_offset_max;
if (i != -1)
{
j_offset_min = A_row_offsets[i];
j_offset_max = A_row_offsets[i + 1];
max_nonzeros_j[tid] = j_offset_max - j_offset_min;
}
else
{
j_offset_min = 0;
j_offset_max = 0;
max_nonzeros_j[tid] = 0;
}
int max_loop_j = 0;
for (int m = 0; m < bsize_sq; m++)
{
int max_nonzeros = max_nonzeros_j[halfwarp_id * bsize_sq + m];
if (max_nonzeros > max_loop_j)
{
max_loop_j = max_nonzeros;
}
}
// Add contribution from nonzero_values of A
int j_offset_to_read;
for (int jloop = 0, j_offset = j_offset_min; jloop < max_loop_j; jloop++, j_offset++)
{
int J, j, k;
if (j_offset < j_offset_max)
{
j_offset_to_read = j_offset;
j = A_column_indices[j_offset];
J = aggregates[j];
// Find index k where to store the data and create A_column_indices
if (I != J)
{
// This weird construct is to allow loop unrolling and avoid register spilling (see original version below)
int found = 0;
k = Ac_offset_min;
#pragma unroll
for (int icount = 0; icount < max_nonzeros_per_row; icount++)
{
if (k < Ac_offset_max)
{
if (found == 0)
{
int Jtemp = (icount < size_hash_reg) ? hash_reg[icount] : hash_shared[shared_offset + icount - size_hash_reg];
if (J == Jtemp)
{
found = 1;
}
if (found == 0) { k++; }
}
}
}
} // if I != J
} // if j_offset < j_offset_max
else
{
j_offset_to_read = -1;
}
// Have threads collaborate to load in coalesced fashion
for (int m = 0; m < bsize_sq; m++)
{
// Write which cell to load in shared memory buffer
if (mat_entry_index == m)
{
current_cell_to_read[halfwarp_id] = j_offset_to_read;
if (I != J)
{
current_cell_to_write[halfwarp_id] = -k;
}
else
{
current_cell_to_write[halfwarp_id] = I + 1;
}
}
// All threads read from shared which cell to read and write
int cell_to_read = current_cell_to_read[halfwarp_id];
int cell_to_write = current_cell_to_write[halfwarp_id];
if (cell_to_read != -1)
{
if (cell_to_write <= 0)
{
Ac_nonzero_values[(-cell_to_write)*bsize_sq + mat_entry_index] += A_nonzero_values[cell_to_read * bsize_sq + mat_entry_index];
}
else
{
Ac_nonzero_values[ Ac_dia_values[ cell_to_write - 1 ] * bsize_sq + mat_entry_index] += A_nonzero_values[cell_to_read * bsize_sq + mat_entry_index];
}
}
}
} // j_offset_loop
} // i_offset_loop
} // if tid < num_threads
}
// Constructor
template<class T_Config>
HybridCoarseAGeneratorBase<T_Config>::HybridCoarseAGeneratorBase()
{
}
//-----------------------------------------------------
// Method to compute the Galerkin product: A_c=R*A*P
// The number of non-zeros per row is computed using Thrust
// The coarse matrix is created by custom kernel
//-----------------------------------------------------
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void HybridCoarseAGenerator<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_4x4(const Matrix<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &A, Matrix<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> > &Ac, const typename Matrix_d::IVector &aggregates, const typename Matrix_d::IVector &R_row_offsets, const typename Matrix_d::IVector &R_column_indices, const int num_aggregates)
{
// supports both DIA properties
if (!A.hasProps(DIAG))
{
FatalError("Hybryd coarser does not support inside diagonal yet\n", AMGX_ERR_NOT_IMPLEMENTED);
}
if (A.get_num_nz() == 0)
{
FatalError("Hybryd coarser does not work correctly with diagonal matrices\n", AMGX_ERR_NOT_IMPLEMENTED);
}
typename Matrix_d::IVector I(A.get_num_nz(), -1);
typename Matrix_d::IVector J(A.get_num_nz(), -1);
typedef device_vector_alloc<IndexType> IntVector;
typedef typename IntVector::iterator IntIterator;
typedef thrust::tuple< IntIterator, IntIterator> IntIteratorTuple;
typedef thrust::zip_iterator<IntIteratorTuple> ZipIterator;
ZipIterator new_end;
const int block_size_I = 128;
const int block_size_J = 256;
const int num_blocks_I = std::min( AMGX_GRID_MAX_SIZE, (int) ((A.get_num_rows() - 1) / block_size_I + 1));
const int num_blocks_J = std::min( AMGX_GRID_MAX_SIZE, (int) ((A.get_num_nz() - 1) / block_size_J + 1));
const IndexType *A_row_offsets_ptr = A.row_offsets.raw();
const IndexType *A_column_indices_ptr = A.col_indices.raw();
const IndexType *A_dia_values_ptr = A.diag.raw();
const ValueType *A_nonzero_values_ptr = A.values.raw();
const IndexType *aggregates_ptr = aggregates.raw();
IndexType *I_ptr = I.raw();
IndexType *J_ptr = J.raw();
// Kernel to fill array I with aggregates number for fine points i
iToIKernel <<< num_blocks_I, block_size_I>>>(A_row_offsets_ptr, aggregates_ptr, I_ptr, (int) A.get_num_rows());
cudaCheckError();
// Kernel to fill array J with aggregates number for fine points j
jToJKernel <<< num_blocks_J, block_size_J>>>(A_column_indices_ptr, aggregates_ptr, J_ptr, (int) A.get_num_nz());
cudaCheckError();
// Sort (I,J) by rows and columns (I,J)
IVector permutation(A.get_num_nz());
thrust::sequence(permutation.begin(), permutation.end());
cudaCheckError();
// compute permutation and sort by (I,J)
{
IVector temp(J);
thrust::stable_sort_by_key(temp.begin(), temp.end(), permutation.begin());
cudaCheckError();
temp = I;
//I = temp;
thrust_wrapper::gather(permutation.begin(), permutation.end(), temp.begin(), I.begin());
cudaCheckError();
thrust::stable_sort_by_key(I.begin(), I.end(), permutation.begin());
cudaCheckError();
temp = J;
//J = temp;
thrust_wrapper::gather(permutation.begin(), permutation.end(), temp.begin(), J.begin());
cudaCheckError();
}
// Remove duplicate tuples
new_end = thrust::unique(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())), thrust::equal_to < thrust::tuple<IndexType, IndexType> >());
cudaCheckError();
IntIteratorTuple endTuple = new_end.get_iterator_tuple();
I.erase(thrust::get<0>(endTuple), I.end());
J.erase(thrust::get<1>(endTuple), J.end());
// Remove diagonal terms
new_end = thrust::remove_if(thrust::make_zip_iterator(thrust::make_tuple(I.begin(), J.begin())),
thrust::make_zip_iterator(thrust::make_tuple(I.end(), J.end())), isDiagonal() );
cudaCheckError();
endTuple = new_end.get_iterator_tuple();
I.erase(thrust::get<0>(endTuple), I.end());
J.erase(thrust::get<1>(endTuple), J.end());
int nonzero_blocks = J.size();
// Resize Ac
Ac.addProps(CSR);
if (A.hasProps(DIAG)) { Ac.addProps(DIAG); }
Ac.resize(num_aggregates, num_aggregates, nonzero_blocks, A.get_block_dimy(), A.get_block_dimx());
//Ac.resize(num_aggregates,num_aggregates,nonzero_blocks,A.get_block_dimy(),A.get_block_dimx(), 1);
// Ac.column_indices
Ac.col_indices = J;
J.clear();
J.shrink_to_fit();
// Convert array new_row_indices to offsets
cusp::detail::indices_to_offsets(I, Ac.row_offsets);
cudaCheckError();
I.resize(Ac.row_offsets.size());
// Compute the maximum number of nonzeros
thrust::adjacent_difference(Ac.row_offsets.begin(), Ac.row_offsets.end(), I.begin());
cudaCheckError();
const IndexType max_nonzero_per_row = *thrust::max_element(I.begin() + 1, I.end());
cudaCheckError();
//std::cout << "max_nonzero_per_row" << max_nonzero_per_row << std::endl;
I.clear();
I.shrink_to_fit();
const IndexType *R_row_offsets_ptr = R_row_offsets.raw();
const IndexType *R_column_indices_ptr = R_column_indices.raw();
// Get max_nonzero_per_row
//const IndexType max_nonzero_per_row = 32;
const int threads_per_block = 128;
// Store the column_indices in a register
// Option to print the number of nonzeros distribution
//printNonzeroStats(Ac_row_offsets_temp,num_aggregates);
// Resize Ac and doing exclusive scan on Ac_row_offsets_temp
IndexType *Ac_row_offsets_ptr = Ac.row_offsets.raw();
IndexType *Ac_column_indices_ptr = Ac.col_indices.raw();
IndexType *Ac_dia_values_ptr = Ac.diag.raw();
ValueType *Ac_nonzero_values_ptr = Ac.values.raw();
// Now create Ac.dia_values and Ac.nonzero_values
//thrust::fill(Ac.diag.begin(),Ac.diag.end(),0.);
thrust::fill(Ac.values.begin(), Ac.values.end(), 0.);
cudaCheckError();
// Coalesced version of kernel to fill A
const int num_threads = ( ( num_aggregates + 15) / 16 ) * 16;
const int num_blocks2 = ( ( num_threads + threads_per_block - 1) / threads_per_block );
if (max_nonzero_per_row < 16)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 16, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 20)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 20, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 24)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 24, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 28)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 28, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 32)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 32, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 36)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 36, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 40)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 40, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 44)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 44, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 48)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 48, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 52)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 52, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 56)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 56, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 60)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 60, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 64)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 64, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 128)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 128, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 256)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 256, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else if (max_nonzero_per_row < 512)
{
fillA_4by4Blocks_Thrust_Kernel<IndexType, ValueType, 256, threads_per_block, 16, 4> <<< num_blocks2, threads_per_block>>>(A_row_offsets_ptr, A_column_indices_ptr, A_dia_values_ptr, A_nonzero_values_ptr, R_row_offsets_ptr, R_column_indices_ptr, Ac_row_offsets_ptr, Ac_column_indices_ptr, Ac_dia_values_ptr, Ac_nonzero_values_ptr, aggregates_ptr, num_aggregates, num_threads);
}
else
{
FatalError("Maximum number of nonzeros is too large", AMGX_ERR_BAD_PARAMETERS);
}
cudaCheckError();
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void HybridCoarseAGenerator<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::computeAOperator_4x4(const Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &A, Matrix<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> > &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates)
{
FatalError("Host is unsupported for HybridCoarseAGenerator", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
// ------------------------------------------------
template <class T_Config>
void HybridCoarseAGeneratorBase<T_Config>::computeAOperator(const Matrix<T_Config> &A, Matrix<T_Config> &Ac, const IVector &aggregates, const IVector &R_row_offsets, const IVector &R_column_indices, const int num_aggregates)
{
Ac.set_initialized(0);
if (A.get_block_dimx() == 4 && A.get_block_dimy() == 4)
{
computeAOperator_4x4( A, Ac, aggregates, R_row_offsets, R_column_indices, num_aggregates );
}
else
{
FatalError("Unsupported block size for HybridCoarseAGenerator", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
Ac.set_initialized(1);
}
// ---------------------------
// Explict instantiations
// ---------------------------
#define AMGX_CASE_LINE(CASE) template class HybridCoarseAGeneratorBase<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class HybridCoarseAGenerator<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
}
|
7936e534f28554e8e1761c15c052e399ee785411.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../inc/RayTracer_DynamicSkeleton.cuh"
#include "../inc/RayTracer_Dynamic_Camera.cuh"
// Define
// ...
// Typedef
// ...
// Static Function Prototpye
// skeleton
Dynamic_CUDA_constructTypeSkeleton(camera_default, Camera, Camera);
// table
// ...
// cuda linker function
// __global__ static void camera_setAll (Camera *camera);
__global__ static void camera_setLookFrom (Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2);
__global__ static void camera_setLookAt (Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2);
__global__ static void camera_setUpDirection (Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2);
__global__ static void camera_setFOV (Camera *camera, fp_t value);
__global__ static void camera_setAspectRatio (Camera *camera, fp_t value);
// Static Data
// ...
// Operation Handling
__host__ void RayTracer_Dynamic_Camera_init(std::vector<Dynamic_ContainerType*> *type_list) {
// table
// ...
// create type
Dynamic_ContainerType *type;
Dynamic_CUDA_addType(camera_default, camera_default, type_list);
}
__host__ void RayTracer_Dynamic_Camera_info() {
}
__host__ void RayTracer_Dynamic_Camera_del() {
}
// cuda linker function
// __host__ void Dynamic_Camera_setAll(Camera *camera) {
// }
__host__ error_t Dynamic_Camera_setLookFrom(Camera *camera, const Vec3f look_from) {
hipLaunchKernelGGL(( camera_setLookFrom) , dim3(1), dim3(1) , 0, 0, camera, look_from[0], look_from[1], look_from[2]);
return ERROR_NO;
}
__host__ error_t Dynamic_Camera_setLookAt(Camera *camera, const Vec3f look_at) {
hipLaunchKernelGGL(( camera_setLookAt) , dim3(1), dim3(1) , 0, 0, camera, look_at[0], look_at[1], look_at[2]);
return ERROR_NO;
}
__host__ error_t Dynamic_Camera_setUpDirection(Camera *camera, const Vec3f up) {
hipLaunchKernelGGL(( camera_setUpDirection) , dim3(1), dim3(1) , 0, 0, camera, up[0], up[1], up[2]);
return ERROR_NO;
}
__host__ error_t Dynamic_Camera_setFOV(Camera *camera, fp_t value) {
hipLaunchKernelGGL(( camera_setFOV) , dim3(1), dim3(1) , 0, 0, camera, value);
return ERROR_NO;
}
__host__ error_t Dynamic_Camera_setAspectRatio(Camera *camera, fp_t value) {
hipLaunchKernelGGL(( camera_setAspectRatio) , dim3(1), dim3(1) , 0, 0, camera, value);
return ERROR_NO;
}
// Static Function Implementation
// table
// ...
// cuda linker function
// __global__ static void camera_setAll(Camera *camera) {
// }
__global__ static void camera_setLookFrom(Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2) {
camera->setLookFrom(Vec3f(v_0, v_1, v_2));
}
__global__ static void camera_setLookAt(Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2) {
camera->setLookAt(Vec3f(v_0, v_1, v_2));
}
__global__ static void camera_setUpDirection(Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2) {
camera->setUpDirection(Vec3f(v_0, v_1, v_2));
}
__global__ static void camera_setFOV(Camera *camera, fp_t value) {
camera->setFOV(value);
}
__global__ static void camera_setAspectRatio(Camera *camera, fp_t value) {
camera->setAspectRatio(value);
}
| 7936e534f28554e8e1761c15c052e399ee785411.cu | #include "../inc/RayTracer_DynamicSkeleton.cuh"
#include "../inc/RayTracer_Dynamic_Camera.cuh"
// Define
// ...
// Typedef
// ...
// Static Function Prototpye
// skeleton
Dynamic_CUDA_constructTypeSkeleton(camera_default, Camera, Camera);
// table
// ...
// cuda linker function
// __global__ static void camera_setAll (Camera *camera);
__global__ static void camera_setLookFrom (Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2);
__global__ static void camera_setLookAt (Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2);
__global__ static void camera_setUpDirection (Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2);
__global__ static void camera_setFOV (Camera *camera, fp_t value);
__global__ static void camera_setAspectRatio (Camera *camera, fp_t value);
// Static Data
// ...
// Operation Handling
__host__ void RayTracer_Dynamic_Camera_init(std::vector<Dynamic_ContainerType*> *type_list) {
// table
// ...
// create type
Dynamic_ContainerType *type;
Dynamic_CUDA_addType(camera_default, camera_default, type_list);
}
__host__ void RayTracer_Dynamic_Camera_info() {
}
__host__ void RayTracer_Dynamic_Camera_del() {
}
// cuda linker function
// __host__ void Dynamic_Camera_setAll(Camera *camera) {
// }
__host__ error_t Dynamic_Camera_setLookFrom(Camera *camera, const Vec3f look_from) {
camera_setLookFrom <<< 1, 1 >>> (camera, look_from[0], look_from[1], look_from[2]);
return ERROR_NO;
}
__host__ error_t Dynamic_Camera_setLookAt(Camera *camera, const Vec3f look_at) {
camera_setLookAt <<< 1, 1 >>> (camera, look_at[0], look_at[1], look_at[2]);
return ERROR_NO;
}
__host__ error_t Dynamic_Camera_setUpDirection(Camera *camera, const Vec3f up) {
camera_setUpDirection <<< 1, 1 >>> (camera, up[0], up[1], up[2]);
return ERROR_NO;
}
__host__ error_t Dynamic_Camera_setFOV(Camera *camera, fp_t value) {
camera_setFOV <<< 1, 1 >>> (camera, value);
return ERROR_NO;
}
__host__ error_t Dynamic_Camera_setAspectRatio(Camera *camera, fp_t value) {
camera_setAspectRatio <<< 1, 1 >>> (camera, value);
return ERROR_NO;
}
// Static Function Implementation
// table
// ...
// cuda linker function
// __global__ static void camera_setAll(Camera *camera) {
// }
__global__ static void camera_setLookFrom(Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2) {
camera->setLookFrom(Vec3f(v_0, v_1, v_2));
}
__global__ static void camera_setLookAt(Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2) {
camera->setLookAt(Vec3f(v_0, v_1, v_2));
}
__global__ static void camera_setUpDirection(Camera *camera, fp_t v_0, fp_t v_1, fp_t v_2) {
camera->setUpDirection(Vec3f(v_0, v_1, v_2));
}
__global__ static void camera_setFOV(Camera *camera, fp_t value) {
camera->setFOV(value);
}
__global__ static void camera_setAspectRatio(Camera *camera, fp_t value) {
camera->setAspectRatio(value);
}
|
b8330bee82b6a85a932b7b203b3b33908006d731.hip | // !!! This is a file automatically generated by hipify!!!
/*
benchmark timing of different conversion implementation;
testing performance only, not for correctness
(will debug & provide correct output later if any candidate has high performance )
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <hip/hip_runtime_api.h>
#define FP_TYPE float
#define POSIT_TYPE uint8_t
#include "posit_cuda.cuh"
texture<float, 1, hipReadModeElementType> t_features;
__constant__ uint16_t table_[256];
__global__ void p2f(POSIT_TYPE in[], FP_TYPE out[], int n, uint32_t startClk[], uint32_t stopClk[]) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sink =0;
if (i < n) {
fp16 temp = in[i] << 8;
asm volatile ("bar.sync 0;");
uint32_t start = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
sink = fp16tofp32_gpu(temp);
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[i] = start;
stopClk[i] = stop;
out [i] = sink;
}
}
__global__ void p2f_lookup(POSIT_TYPE in[], FP_TYPE out[], int n, uint32_t startClk[], uint32_t stopClk[]) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sink = 0;
if (i < n) {
short index = in[i];
asm volatile ("bar.sync 0;");
uint32_t start = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
uint32_t temp = table_[index]<<16;
sink = *((float*)&temp);
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[i] = start;
stopClk[i] = stop;
out [i] = sink;
}
}
__global__ void p2f_lookup_shared_mem(POSIT_TYPE in[], FP_TYPE out[], int n, uint32_t startClk[], uint32_t stopClk[]) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sink = 0;
if (i < n) {
// prepare shared mem data
short index = in[i] ;
asm volatile ("bar.sync 0;");
uint32_t start = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
__shared__ uint16_t table_shared[256];
table_shared[threadIdx.x] = table_[threadIdx.x];
__syncthreads();
uint32_t temp = table_shared[index]<<16;
sink = *((float*)&temp);
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[i] = start;
stopClk[i] = stop;
out [i] = sink;
}
}
__global__ void p2f_lookup_texture(POSIT_TYPE in[], FP_TYPE out[], int n, uint32_t startClk[], uint32_t stopClk[]) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sink = 0;
if (i < n) {
short index = in[i] ;
asm volatile ("bar.sync 0;");
uint32_t start = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
sink = tex1Dfetch(t_features,index);
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[i] = start;
stopClk[i] = stop;
out [i]= sink;
//out [i] = *((float*)&temp);
}
}
__global__ void p2f_dummy_1op(FP_TYPE in[], FP_TYPE out[], int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
out [i]+= 0.2*in[i];
}
}
__global__ void p2f_dummy_coppy(FP_TYPE in[], FP_TYPE out[], int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
out [i] = in[i];
}
}
__global__ void p2f_dummy_coppy_uint8(POSIT_TYPE in[], POSIT_TYPE out[], int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
out [i] = in[i];
}
}
/* Host code */
int main(int argc, char* argv[]) {
int n, i;
POSIT_TYPE *h_in, *d_in;
FP_TYPE *h_out, *d_out;
FP_TYPE *table_global;
int threads_per_block;
int block_count;
//size_t size;
hipEvent_t start, stop;
float elapsedTime;
/* Get number of components in vector */
if (argc != 2) {
fprintf(stderr, "usage: %s <vector order>\n", argv[0]);
exit(0);
}
n = strtol(argv[1], NULL, 10);
uint16_t lookup_table[256] = {0x0, 0x3380, 0x3580, 0x3680, 0x3780, 0x3800, 0x3880, 0x3900, 0x3980, 0x39c0, 0x3a00, 0x3a40, 0x3a80, 0x3ac0, 0x3b00, 0x3b40, 0x3b80, 0x3ba0, 0x3bc0, 0x3be0, 0x3c00, 0x3c20, 0x3c40, 0x3c60, 0x3c80, 0x3ca0, 0x3cc0, 0x3ce0, 0x3d00, 0x3d20, 0x3d40, 0x3d60, 0x3d80, 0x3d90, 0x3da0, 0x3db0, 0x3dc0, 0x3dd0, 0x3de0, 0x3df0, 0x3e00, 0x3e10, 0x3e20, 0x3e30, 0x3e40, 0x3e50, 0x3e60, 0x3e70, 0x3e80, 0x3e90, 0x3ea0, 0x3eb0, 0x3ec0, 0x3ed0, 0x3ee0, 0x3ef0, 0x3f00, 0x3f10, 0x3f20, 0x3f30, 0x3f40, 0x3f50, 0x3f60, 0x3f70, 0x3f80, 0x3f90, 0x3fa0, 0x3fb0, 0x3fc0, 0x3fd0, 0x3fe0, 0x3ff0, 0x4000, 0x4010, 0x4020, 0x4030, 0x4040, 0x4050, 0x4060, 0x4070, 0x4080, 0x4090, 0x40a0, 0x40b0, 0x40c0, 0x40d0, 0x40e0, 0x40f0, 0x4100, 0x4110, 0x4120, 0x4130, 0x4140, 0x4150, 0x4160, 0x4170, 0x4180, 0x41a0, 0x41c0, 0x41e0, 0x4200, 0x4220, 0x4240, 0x4260, 0x4280, 0x42a0, 0x42c0, 0x42e0, 0x4300, 0x4320, 0x4340, 0x4360, 0x4380, 0x43c0, 0x4400, 0x4440, 0x4480, 0x44c0, 0x4500, 0x4540, 0x4580, 0x4600, 0x4680, 0x4700, 0x4780, 0x4880, 0x4980, 0x4b80, 0xff80, 0xcb80, 0xc980, 0xc880, 0xc780, 0xc700, 0xc680, 0xc600, 0xc580, 0xc540, 0xc500, 0xc4c0, 0xc480, 0xc440, 0xc400, 0xc3c0, 0xc380, 0xc360, 0xc340, 0xc320, 0xc300, 0xc2e0, 0xc2c0, 0xc2a0, 0xc280, 0xc260, 0xc240, 0xc220, 0xc200, 0xc1e0, 0xc1c0, 0xc1a0, 0xc180, 0xc170, 0xc160, 0xc150, 0xc140, 0xc130, 0xc120, 0xc110, 0xc100, 0xc0f0, 0xc0e0, 0xc0d0, 0xc0c0, 0xc0b0, 0xc0a0, 0xc090, 0xc080, 0xc070, 0xc060, 0xc050, 0xc040, 0xc030, 0xc020, 0xc010, 0xc000, 0xbff0, 0xbfe0, 0xbfd0, 0xbfc0, 0xbfb0, 0xbfa0, 0xbf90, 0xbf80, 0xbf70, 0xbf60, 0xbf50, 0xbf40, 0xbf30, 0xbf20, 0xbf10, 0xbf00, 0xbef0, 0xbee0, 0xbed0, 0xbec0, 0xbeb0, 0xbea0, 0xbe90, 0xbe80, 0xbe70, 0xbe60, 0xbe50, 0xbe40, 0xbe30, 0xbe20, 0xbe10, 0xbe00, 0xbdf0, 0xbde0, 0xbdd0, 0xbdc0, 0xbdb0, 0xbda0, 0xbd90, 0xbd80, 0xbd60, 0xbd40, 0xbd20, 0xbd00, 0xbce0, 0xbcc0, 0xbca0, 0xbc80, 0xbc60, 0xbc40, 0xbc20, 0xbc00, 0xbbe0, 0xbbc0, 0xbba0, 0xbb80, 0xbb40, 0xbb00, 0xbac0, 0xba80, 0xba40, 0xba00, 0xb9c0, 0xb980, 0xb900, 0xb880, 0xb800, 0xb780, 0xb680, 0xb580, 0xb380 };
hipMemcpyToSymbol(table_, lookup_table, 256*sizeof(uint16_t));
uint32_t lookup_table_int[256];
for (int j =0; j<256; j ++)
lookup_table_int[j] = lookup_table[j] <<16;
/* Allocate input vectors in host memory */
h_in = (POSIT_TYPE*) malloc(n*sizeof(POSIT_TYPE));
h_out = (FP_TYPE*) malloc(n*sizeof(FP_TYPE));
srand(0); // Initialization, should only be called once.
/* Initialize input vectors */
for (i = 0; i < n; i++) {
h_in[i] = rand()%256;
}
uint32_t *h_startClk, *h_stopClk;
uint32_t *d_startClk, *d_stopClk;
int size_clock = n*sizeof(uint32_t);
h_startClk = (uint32_t*) malloc(size_clock);
h_stopClk = (uint32_t*) malloc(size_clock);
hipMalloc(&d_stopClk, size_clock);
hipMalloc(&d_startClk, size_clock);
/* Allocate vectors in device memory */
hipMalloc(&d_in, n*sizeof(POSIT_TYPE));
hipMalloc(&d_out, n*sizeof(FP_TYPE));
hipMalloc(&table_global, 256*sizeof(FP_TYPE));
/* Copy vectors from host memory to device memory */
hipMemcpy(d_in, h_in, n*sizeof(POSIT_TYPE), hipMemcpyHostToDevice);
hipMemcpy(table_global, lookup_table_int, 256*sizeof(FP_TYPE), hipMemcpyHostToDevice);
// this wont give the correct answer cuz fptype = 4 bytes, lookup_table 2 bytes per element, simply append 0x00 to each element in lookup_table to fix this
//hipMemcpy(d_out, h_out, n*sizeof(FP_TYPE), hipMemcpyHostToDevice);
/* Define block size */
threads_per_block = 256;
hipDeviceSynchronize();
// check for error
hipError_t error = hipGetLastError();
if(error != hipSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
exit(-1);
}
block_count = (n + threads_per_block - 1)/threads_per_block;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start,0);
hipLaunchKernelGGL(( p2f), dim3(block_count), dim3(threads_per_block), 0, 0, d_in, d_out, n, d_startClk, d_stopClk);
//hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start,stop);
hipMemcpy(h_out, d_out, n*sizeof(FP_TYPE), hipMemcpyDeviceToHost);
hipMemcpy(h_startClk, d_startClk, size_clock, hipMemcpyDeviceToHost);
hipMemcpy(h_stopClk, d_stopClk, size_clock, hipMemcpyDeviceToHost);
uint32_t sum = 0;
printf("clk cycles spent on each thread \n");
for (i = 0; i < n; i++) {
if (i%1000==0)
printf("%u,", h_stopClk[i] - h_startClk[i]);
sum+=h_stopClk[i] - h_startClk[i];
}
printf("\n -------- \n average latency (cycles) %f \n",float(sum)/n);
//printf("Elapsed time himeshi's implementation: %f ms\n" ,elapsedTime);
//hipDeviceSynchronize();
hipEventRecord(start,0);
hipLaunchKernelGGL(( p2f_lookup), dim3(block_count), dim3(threads_per_block), 0, 0, d_in, d_out, n, d_startClk, d_stopClk);
//hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipMemcpy(h_out, d_out, n*sizeof(FP_TYPE), hipMemcpyDeviceToHost);
hipEventElapsedTime(&elapsedTime, start,stop);
//printf("Elapsed time lookup constant mem: %f ms\n" ,elapsedTime);
hipMemcpy(h_startClk, d_startClk, size_clock, hipMemcpyDeviceToHost);
hipMemcpy(h_stopClk, d_stopClk, size_clock, hipMemcpyDeviceToHost);
sum = 0;
printf("clk cycles spent on each thread \n");
for (i = 0; i < n; i++) {
if (i%1000==0)
printf("%u,", h_stopClk[i] - h_startClk[i]);
sum+=h_stopClk[i] - h_startClk[i];
}
printf("\n -------- \n Lookup average latency (cycles) %f \n",float(sum)/n);
//hipDeviceSynchronize();
hipEventRecord(start,0);
hipLaunchKernelGGL(( p2f_lookup_shared_mem), dim3(block_count), dim3(threads_per_block), 0, 0, d_in, d_out, n, d_startClk, d_stopClk );
//hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipMemcpy(h_out, d_out, n*sizeof(FP_TYPE), hipMemcpyDeviceToHost);
hipEventElapsedTime(&elapsedTime, start,stop);
// printf("Elapsed time lookup shared mem: %f ms\n" ,elapsedTime);
hipMemcpy(h_startClk, d_startClk, size_clock, hipMemcpyDeviceToHost);
hipMemcpy(h_stopClk, d_stopClk, size_clock, hipMemcpyDeviceToHost);
sum = 0;
printf("clk cycles spent on each thread \n");
for (i = 0; i < n; i++) {
if (i%1000==0)
printf("%u,", h_stopClk[i] - h_startClk[i]);
sum+=h_stopClk[i] - h_startClk[i];
}
printf("\n -------- \n Lookup sharedmem latency (cycles) %f \n",float(sum)/n);
/*
for debugging
for (int k = 0; k <16; k++)
printf("%d %f\n", h_in[k], h_out[k]);
*/
hipChannelFormatDesc chDesc0 = hipCreateChannelDesc<int>();
t_features.filterMode = hipFilterModePoint;
t_features.normalized = false;
t_features.channelDesc = chDesc0;
if(hipBindTexture(NULL, &t_features, table_global, &chDesc0, n*sizeof(uint32_t)) != hipSuccess)
printf("Couldn't bind features array to texture!\n");
//hipDeviceSynchronize();
hipEventRecord(start,0);
hipLaunchKernelGGL(( p2f_lookup_texture), dim3(block_count), dim3(threads_per_block), 0, 0, d_in, d_out, n, d_startClk, d_stopClk);
//hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipMemcpy(h_out, d_out, n*sizeof(FP_TYPE), hipMemcpyDeviceToHost);
hipEventElapsedTime(&elapsedTime, start,stop);
//printf("Elapsed time texture lookup: %f ms\n" ,elapsedTime);
hipMemcpy(h_startClk, d_startClk, size_clock, hipMemcpyDeviceToHost);
hipMemcpy(h_stopClk, d_stopClk, size_clock, hipMemcpyDeviceToHost);
sum = 0;
printf("clk cycles spent on each thread \n");
for (i = 0; i < n; i++) {
if (i%1000==0)
printf("%u,", h_stopClk[i] - h_startClk[i]);
sum+=h_stopClk[i] - h_startClk[i];
}
printf("\n -------- \n Lookup texture latency (cycles) %f \n",float(sum)/n);
exit(0);
FP_TYPE * d_out_dummy ;
hipMalloc(&d_out_dummy, n*sizeof(FP_TYPE));
//hipDeviceSynchronize();
hipEventRecord(start,0);
hipLaunchKernelGGL(( p2f_dummy_1op), dim3(block_count), dim3(threads_per_block), 0, 0, d_out, d_out_dummy, n);
//hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipMemcpy(h_out, d_out, n*sizeof(FP_TYPE), hipMemcpyDeviceToHost);
hipEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time 1 MAC float : %f ms\n" ,elapsedTime);
// hipDeviceSynchronize();
hipEventRecord(start,0);
hipLaunchKernelGGL(( p2f_dummy_coppy), dim3(block_count), dim3(threads_per_block), 0, 0, d_out, d_out_dummy, n);
//hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipMemcpy(h_out, d_out, n*sizeof(FP_TYPE), hipMemcpyDeviceToHost);
hipEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time 1 copy fp32 : %f ms\n" ,elapsedTime);
POSIT_TYPE * d_in_dummy ;
hipMalloc(&d_in_dummy, n*sizeof(POSIT_TYPE));
// hipDeviceSynchronize();
hipEventRecord(start,0);
hipLaunchKernelGGL(( p2f_dummy_coppy_uint8), dim3(block_count), dim3(threads_per_block), 0, 0, d_in, d_in_dummy, n);
// hipDeviceSynchronize();
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipMemcpy(h_out, d_out, n*sizeof(FP_TYPE), hipMemcpyDeviceToHost);
hipEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time 1 copy uint_8 : %f ms\n" ,elapsedTime);
/*
for (int k = 0; k <16; k++)
printf("%d %f\n", h_in[k], h_out[k]);
*/
/* Free device memory */
hipFree(d_out);
hipFree(d_in);
/* Free host memory */
free(h_out);
free(h_in);
return 0;
} /* main */
| b8330bee82b6a85a932b7b203b3b33908006d731.cu | /*
benchmark timing of different conversion implementation;
testing performance only, not for correctness
(will debug & provide correct output later if any candidate has high performance )
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <cuda_runtime_api.h>
#define FP_TYPE float
#define POSIT_TYPE uint8_t
#include "posit_cuda.cuh"
texture<float, 1, cudaReadModeElementType> t_features;
__constant__ uint16_t table_[256];
__global__ void p2f(POSIT_TYPE in[], FP_TYPE out[], int n, uint32_t startClk[], uint32_t stopClk[]) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sink =0;
if (i < n) {
fp16 temp = in[i] << 8;
asm volatile ("bar.sync 0;");
uint32_t start = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
sink = fp16tofp32_gpu(temp);
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[i] = start;
stopClk[i] = stop;
out [i] = sink;
}
}
__global__ void p2f_lookup(POSIT_TYPE in[], FP_TYPE out[], int n, uint32_t startClk[], uint32_t stopClk[]) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sink = 0;
if (i < n) {
short index = in[i];
asm volatile ("bar.sync 0;");
uint32_t start = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
uint32_t temp = table_[index]<<16;
sink = *((float*)&temp);
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[i] = start;
stopClk[i] = stop;
out [i] = sink;
}
}
__global__ void p2f_lookup_shared_mem(POSIT_TYPE in[], FP_TYPE out[], int n, uint32_t startClk[], uint32_t stopClk[]) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sink = 0;
if (i < n) {
// prepare shared mem data
short index = in[i] ;
asm volatile ("bar.sync 0;");
uint32_t start = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
__shared__ uint16_t table_shared[256];
table_shared[threadIdx.x] = table_[threadIdx.x];
__syncthreads();
uint32_t temp = table_shared[index]<<16;
sink = *((float*)&temp);
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[i] = start;
stopClk[i] = stop;
out [i] = sink;
}
}
__global__ void p2f_lookup_texture(POSIT_TYPE in[], FP_TYPE out[], int n, uint32_t startClk[], uint32_t stopClk[]) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
float sink = 0;
if (i < n) {
short index = in[i] ;
asm volatile ("bar.sync 0;");
uint32_t start = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
sink = tex1Dfetch(t_features,index);
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[i] = start;
stopClk[i] = stop;
out [i]= sink;
//out [i] = *((float*)&temp);
}
}
__global__ void p2f_dummy_1op(FP_TYPE in[], FP_TYPE out[], int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
out [i]+= 0.2*in[i];
}
}
__global__ void p2f_dummy_coppy(FP_TYPE in[], FP_TYPE out[], int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
out [i] = in[i];
}
}
__global__ void p2f_dummy_coppy_uint8(POSIT_TYPE in[], POSIT_TYPE out[], int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
out [i] = in[i];
}
}
/* Host code */
int main(int argc, char* argv[]) {
int n, i;
POSIT_TYPE *h_in, *d_in;
FP_TYPE *h_out, *d_out;
FP_TYPE *table_global;
int threads_per_block;
int block_count;
//size_t size;
cudaEvent_t start, stop;
float elapsedTime;
/* Get number of components in vector */
if (argc != 2) {
fprintf(stderr, "usage: %s <vector order>\n", argv[0]);
exit(0);
}
n = strtol(argv[1], NULL, 10);
uint16_t lookup_table[256] = {0x0, 0x3380, 0x3580, 0x3680, 0x3780, 0x3800, 0x3880, 0x3900, 0x3980, 0x39c0, 0x3a00, 0x3a40, 0x3a80, 0x3ac0, 0x3b00, 0x3b40, 0x3b80, 0x3ba0, 0x3bc0, 0x3be0, 0x3c00, 0x3c20, 0x3c40, 0x3c60, 0x3c80, 0x3ca0, 0x3cc0, 0x3ce0, 0x3d00, 0x3d20, 0x3d40, 0x3d60, 0x3d80, 0x3d90, 0x3da0, 0x3db0, 0x3dc0, 0x3dd0, 0x3de0, 0x3df0, 0x3e00, 0x3e10, 0x3e20, 0x3e30, 0x3e40, 0x3e50, 0x3e60, 0x3e70, 0x3e80, 0x3e90, 0x3ea0, 0x3eb0, 0x3ec0, 0x3ed0, 0x3ee0, 0x3ef0, 0x3f00, 0x3f10, 0x3f20, 0x3f30, 0x3f40, 0x3f50, 0x3f60, 0x3f70, 0x3f80, 0x3f90, 0x3fa0, 0x3fb0, 0x3fc0, 0x3fd0, 0x3fe0, 0x3ff0, 0x4000, 0x4010, 0x4020, 0x4030, 0x4040, 0x4050, 0x4060, 0x4070, 0x4080, 0x4090, 0x40a0, 0x40b0, 0x40c0, 0x40d0, 0x40e0, 0x40f0, 0x4100, 0x4110, 0x4120, 0x4130, 0x4140, 0x4150, 0x4160, 0x4170, 0x4180, 0x41a0, 0x41c0, 0x41e0, 0x4200, 0x4220, 0x4240, 0x4260, 0x4280, 0x42a0, 0x42c0, 0x42e0, 0x4300, 0x4320, 0x4340, 0x4360, 0x4380, 0x43c0, 0x4400, 0x4440, 0x4480, 0x44c0, 0x4500, 0x4540, 0x4580, 0x4600, 0x4680, 0x4700, 0x4780, 0x4880, 0x4980, 0x4b80, 0xff80, 0xcb80, 0xc980, 0xc880, 0xc780, 0xc700, 0xc680, 0xc600, 0xc580, 0xc540, 0xc500, 0xc4c0, 0xc480, 0xc440, 0xc400, 0xc3c0, 0xc380, 0xc360, 0xc340, 0xc320, 0xc300, 0xc2e0, 0xc2c0, 0xc2a0, 0xc280, 0xc260, 0xc240, 0xc220, 0xc200, 0xc1e0, 0xc1c0, 0xc1a0, 0xc180, 0xc170, 0xc160, 0xc150, 0xc140, 0xc130, 0xc120, 0xc110, 0xc100, 0xc0f0, 0xc0e0, 0xc0d0, 0xc0c0, 0xc0b0, 0xc0a0, 0xc090, 0xc080, 0xc070, 0xc060, 0xc050, 0xc040, 0xc030, 0xc020, 0xc010, 0xc000, 0xbff0, 0xbfe0, 0xbfd0, 0xbfc0, 0xbfb0, 0xbfa0, 0xbf90, 0xbf80, 0xbf70, 0xbf60, 0xbf50, 0xbf40, 0xbf30, 0xbf20, 0xbf10, 0xbf00, 0xbef0, 0xbee0, 0xbed0, 0xbec0, 0xbeb0, 0xbea0, 0xbe90, 0xbe80, 0xbe70, 0xbe60, 0xbe50, 0xbe40, 0xbe30, 0xbe20, 0xbe10, 0xbe00, 0xbdf0, 0xbde0, 0xbdd0, 0xbdc0, 0xbdb0, 0xbda0, 0xbd90, 0xbd80, 0xbd60, 0xbd40, 0xbd20, 0xbd00, 0xbce0, 0xbcc0, 0xbca0, 0xbc80, 0xbc60, 0xbc40, 0xbc20, 0xbc00, 0xbbe0, 0xbbc0, 0xbba0, 0xbb80, 0xbb40, 0xbb00, 0xbac0, 0xba80, 0xba40, 0xba00, 0xb9c0, 0xb980, 0xb900, 0xb880, 0xb800, 0xb780, 0xb680, 0xb580, 0xb380 };
cudaMemcpyToSymbol(table_, lookup_table, 256*sizeof(uint16_t));
uint32_t lookup_table_int[256];
for (int j =0; j<256; j ++)
lookup_table_int[j] = lookup_table[j] <<16;
/* Allocate input vectors in host memory */
h_in = (POSIT_TYPE*) malloc(n*sizeof(POSIT_TYPE));
h_out = (FP_TYPE*) malloc(n*sizeof(FP_TYPE));
srand(0); // Initialization, should only be called once.
/* Initialize input vectors */
for (i = 0; i < n; i++) {
h_in[i] = rand()%256;
}
uint32_t *h_startClk, *h_stopClk;
uint32_t *d_startClk, *d_stopClk;
int size_clock = n*sizeof(uint32_t);
h_startClk = (uint32_t*) malloc(size_clock);
h_stopClk = (uint32_t*) malloc(size_clock);
cudaMalloc(&d_stopClk, size_clock);
cudaMalloc(&d_startClk, size_clock);
/* Allocate vectors in device memory */
cudaMalloc(&d_in, n*sizeof(POSIT_TYPE));
cudaMalloc(&d_out, n*sizeof(FP_TYPE));
cudaMalloc(&table_global, 256*sizeof(FP_TYPE));
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_in, h_in, n*sizeof(POSIT_TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(table_global, lookup_table_int, 256*sizeof(FP_TYPE), cudaMemcpyHostToDevice);
// this wont give the correct answer cuz fptype = 4 bytes, lookup_table 2 bytes per element, simply append 0x00 to each element in lookup_table to fix this
//cudaMemcpy(d_out, h_out, n*sizeof(FP_TYPE), cudaMemcpyHostToDevice);
/* Define block size */
threads_per_block = 256;
cudaDeviceSynchronize();
// check for error
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
block_count = (n + threads_per_block - 1)/threads_per_block;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
p2f<<<block_count, threads_per_block>>>(d_in, d_out, n, d_startClk, d_stopClk);
//cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
cudaMemcpy(h_out, d_out, n*sizeof(FP_TYPE), cudaMemcpyDeviceToHost);
cudaMemcpy(h_startClk, d_startClk, size_clock, cudaMemcpyDeviceToHost);
cudaMemcpy(h_stopClk, d_stopClk, size_clock, cudaMemcpyDeviceToHost);
uint32_t sum = 0;
printf("clk cycles spent on each thread \n");
for (i = 0; i < n; i++) {
if (i%1000==0)
printf("%u,", h_stopClk[i] - h_startClk[i]);
sum+=h_stopClk[i] - h_startClk[i];
}
printf("\n -------- \n average latency (cycles) %f \n",float(sum)/n);
//printf("Elapsed time himeshi's implementation: %f ms\n" ,elapsedTime);
//cudaDeviceSynchronize();
cudaEventRecord(start,0);
p2f_lookup<<<block_count, threads_per_block>>>(d_in, d_out, n, d_startClk, d_stopClk);
//cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(h_out, d_out, n*sizeof(FP_TYPE), cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&elapsedTime, start,stop);
//printf("Elapsed time lookup constant mem: %f ms\n" ,elapsedTime);
cudaMemcpy(h_startClk, d_startClk, size_clock, cudaMemcpyDeviceToHost);
cudaMemcpy(h_stopClk, d_stopClk, size_clock, cudaMemcpyDeviceToHost);
sum = 0;
printf("clk cycles spent on each thread \n");
for (i = 0; i < n; i++) {
if (i%1000==0)
printf("%u,", h_stopClk[i] - h_startClk[i]);
sum+=h_stopClk[i] - h_startClk[i];
}
printf("\n -------- \n Lookup average latency (cycles) %f \n",float(sum)/n);
//cudaDeviceSynchronize();
cudaEventRecord(start,0);
p2f_lookup_shared_mem<<<block_count, threads_per_block>>>(d_in, d_out, n, d_startClk, d_stopClk );
//cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(h_out, d_out, n*sizeof(FP_TYPE), cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&elapsedTime, start,stop);
// printf("Elapsed time lookup shared mem: %f ms\n" ,elapsedTime);
cudaMemcpy(h_startClk, d_startClk, size_clock, cudaMemcpyDeviceToHost);
cudaMemcpy(h_stopClk, d_stopClk, size_clock, cudaMemcpyDeviceToHost);
sum = 0;
printf("clk cycles spent on each thread \n");
for (i = 0; i < n; i++) {
if (i%1000==0)
printf("%u,", h_stopClk[i] - h_startClk[i]);
sum+=h_stopClk[i] - h_startClk[i];
}
printf("\n -------- \n Lookup sharedmem latency (cycles) %f \n",float(sum)/n);
/*
for debugging
for (int k = 0; k <16; k++)
printf("%d %f\n", h_in[k], h_out[k]);
*/
cudaChannelFormatDesc chDesc0 = cudaCreateChannelDesc<int>();
t_features.filterMode = cudaFilterModePoint;
t_features.normalized = false;
t_features.channelDesc = chDesc0;
if(cudaBindTexture(NULL, &t_features, table_global, &chDesc0, n*sizeof(uint32_t)) != CUDA_SUCCESS)
printf("Couldn't bind features array to texture!\n");
//cudaDeviceSynchronize();
cudaEventRecord(start,0);
p2f_lookup_texture<<<block_count, threads_per_block>>>(d_in, d_out, n, d_startClk, d_stopClk);
//cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(h_out, d_out, n*sizeof(FP_TYPE), cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&elapsedTime, start,stop);
//printf("Elapsed time texture lookup: %f ms\n" ,elapsedTime);
cudaMemcpy(h_startClk, d_startClk, size_clock, cudaMemcpyDeviceToHost);
cudaMemcpy(h_stopClk, d_stopClk, size_clock, cudaMemcpyDeviceToHost);
sum = 0;
printf("clk cycles spent on each thread \n");
for (i = 0; i < n; i++) {
if (i%1000==0)
printf("%u,", h_stopClk[i] - h_startClk[i]);
sum+=h_stopClk[i] - h_startClk[i];
}
printf("\n -------- \n Lookup texture latency (cycles) %f \n",float(sum)/n);
exit(0);
FP_TYPE * d_out_dummy ;
cudaMalloc(&d_out_dummy, n*sizeof(FP_TYPE));
//cudaDeviceSynchronize();
cudaEventRecord(start,0);
p2f_dummy_1op<<<block_count, threads_per_block>>>(d_out, d_out_dummy, n);
//cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(h_out, d_out, n*sizeof(FP_TYPE), cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time 1 MAC float : %f ms\n" ,elapsedTime);
// cudaDeviceSynchronize();
cudaEventRecord(start,0);
p2f_dummy_coppy<<<block_count, threads_per_block>>>(d_out, d_out_dummy, n);
//cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(h_out, d_out, n*sizeof(FP_TYPE), cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time 1 copy fp32 : %f ms\n" ,elapsedTime);
POSIT_TYPE * d_in_dummy ;
cudaMalloc(&d_in_dummy, n*sizeof(POSIT_TYPE));
// cudaDeviceSynchronize();
cudaEventRecord(start,0);
p2f_dummy_coppy_uint8<<<block_count, threads_per_block>>>(d_in, d_in_dummy, n);
// cudaDeviceSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(h_out, d_out, n*sizeof(FP_TYPE), cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time 1 copy uint_8 : %f ms\n" ,elapsedTime);
/*
for (int k = 0; k <16; k++)
printf("%d %f\n", h_in[k], h_out[k]);
*/
/* Free device memory */
cudaFree(d_out);
cudaFree(d_in);
/* Free host memory */
free(h_out);
free(h_in);
return 0;
} /* main */
|
d1193e14bd82a286fd4ee6a2211ef3e62361c25a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<cuda.h>
#include<stdio.h>
#include<math.h>
#define TILEWIDTH 32
__global__
void vecMulMatrixKernel(float* A, float* B, float* C, int n){
//each block loads the corresponding row of blocks of A matrix and column of blocks of B matrix, one block at a time and then clculates the product for that part then product of a the parts is added.
// each thread loads 2 elements one from A and one from B in each phase
// there are total gridDim.x phases
// the element loaded is the element at the same position as this thread but in a different block
//if run more thread than max then not run
int tx=threadIdx.x; int ty=threadIdx.y;
int bx=blockIdx.x; int by=blockIdx.y;
int row=by*blockDim.y+ty;
int col=bx*blockDim.x+tx;
__shared__ float Ads[TILEWIDTH][TILEWIDTH];
__shared__ float Bds[TILEWIDTH][TILEWIDTH];
if(row<n && col <n){
int i; float val=0.0;
for(i=0;i<gridDim.x-1;i++){
Ads[ty][tx] = A[ row*n + i*TILEWIDTH + tx];
Bds[ty][tx] = B[ (i*TILEWIDTH + ty)*n + col];
__syncthreads();
for(int k=0;k<TILEWIDTH;k++){
val+= Ads[ty][k]*Bds[tx][k];
}
__syncthreads();
}
if(i*TILEWIDTH + tx <n ) //if n was a multiple of blockDim then this was not required
Ads[ty][tx] = A[ row*n + i*TILEWIDTH + tx];
if(i*TILEWIDTH + ty <n )
Bds[ty][tx] = B[ (i*TILEWIDTH + ty)*n + col];
__syncthreads();
int m =n%TILEWIDTH;
if(m==0)
m=TILEWIDTH;
for(int k=0;k<m;k++){//printf("add");
val+= Ads[ty][k]*Bds[tx][k];
}
__syncthreads();
C[row*n + col]= val;
}
}
int min2Power(int x){
int res=1;
while(res<x){
res*=2;
}
return res/2;
}
__host__
void vecMulMatrix(float* A,float* B,float* C, int n){
int size = n * n * sizeof(float);
float *d_A, *d_B, *d_C;
//Allocate device memory for A,B,C
hipMalloc((void**)&d_A, size);
hipMalloc((void**)&d_B, size);
hipMalloc((void**)&d_C, size);
//copy A,B to device memory
hipMemcpy(d_A, A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, size, hipMemcpyHostToDevice);
//call kernal function that the calculates the product and stores it in C
dim3 dimBlock(TILEWIDTH,TILEWIDTH,1);
dim3 dimGrid(ceil(n/(float)TILEWIDTH),ceil(n/(float)TILEWIDTH),1);
hipLaunchKernelGGL(( vecMulMatrixKernel), dim3(dimGrid),dim3(dimBlock) , 0, 0, d_A,d_B,d_C,n);
//copy C from devce memory
hipMemcpy(C, d_C, size, hipMemcpyDeviceToHost);
//free device memories
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
//Kernal function that runs in each thread
int main(){
int n=10;
int i,j;
float A[n][n],C[n][n],B[n][n];
for(i=0;i<n;i++){
for(j=0;j<n;j++){
A[i][j]=i+j;
B[i][j]=i*j;
}
}
vecMulMatrix(&A[0][0],&B[0][0],&C[0][0],n);
for(i=0;i<n;i++){
for(j=0;j<n;j++){
printf("%.3f ",A[i][j]);
}
printf("\n");
}
printf("---\n");
for(i=0;i<n;i++){
for(j=0;j<n;j++){
printf("%.3f ",B[i][j]);
}
printf("\n");
}
printf("---\n");
for(i=0;i<n;i++){
for(j=0;j<n;j++){
printf("%.3f ",C[i][j]);
}
printf("\n");
}
return 0;
}
| d1193e14bd82a286fd4ee6a2211ef3e62361c25a.cu | #include<cuda.h>
#include<stdio.h>
#include<math.h>
#define TILEWIDTH 32
__global__
void vecMulMatrixKernel(float* A, float* B, float* C, int n){
//each block loads the corresponding row of blocks of A matrix and column of blocks of B matrix, one block at a time and then clculates the product for that part then product of a the parts is added.
// each thread loads 2 elements one from A and one from B in each phase
// there are total gridDim.x phases
// the element loaded is the element at the same position as this thread but in a different block
//if run more thread than max then not run
int tx=threadIdx.x; int ty=threadIdx.y;
int bx=blockIdx.x; int by=blockIdx.y;
int row=by*blockDim.y+ty;
int col=bx*blockDim.x+tx;
__shared__ float Ads[TILEWIDTH][TILEWIDTH];
__shared__ float Bds[TILEWIDTH][TILEWIDTH];
if(row<n && col <n){
int i; float val=0.0;
for(i=0;i<gridDim.x-1;i++){
Ads[ty][tx] = A[ row*n + i*TILEWIDTH + tx];
Bds[ty][tx] = B[ (i*TILEWIDTH + ty)*n + col];
__syncthreads();
for(int k=0;k<TILEWIDTH;k++){
val+= Ads[ty][k]*Bds[tx][k];
}
__syncthreads();
}
if(i*TILEWIDTH + tx <n ) //if n was a multiple of blockDim then this was not required
Ads[ty][tx] = A[ row*n + i*TILEWIDTH + tx];
if(i*TILEWIDTH + ty <n )
Bds[ty][tx] = B[ (i*TILEWIDTH + ty)*n + col];
__syncthreads();
int m =n%TILEWIDTH;
if(m==0)
m=TILEWIDTH;
for(int k=0;k<m;k++){//printf("add");
val+= Ads[ty][k]*Bds[tx][k];
}
__syncthreads();
C[row*n + col]= val;
}
}
int min2Power(int x){
int res=1;
while(res<x){
res*=2;
}
return res/2;
}
__host__
void vecMulMatrix(float* A,float* B,float* C, int n){
int size = n * n * sizeof(float);
float *d_A, *d_B, *d_C;
//Allocate device memory for A,B,C
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
//copy A,B to device memory
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
//call kernal function that the calculates the product and stores it in C
dim3 dimBlock(TILEWIDTH,TILEWIDTH,1);
dim3 dimGrid(ceil(n/(float)TILEWIDTH),ceil(n/(float)TILEWIDTH),1);
vecMulMatrixKernel<<<dimGrid,dimBlock >>>(d_A,d_B,d_C,n);
//copy C from devce memory
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
//free device memories
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
//Kernal function that runs in each thread
int main(){
int n=10;
int i,j;
float A[n][n],C[n][n],B[n][n];
for(i=0;i<n;i++){
for(j=0;j<n;j++){
A[i][j]=i+j;
B[i][j]=i*j;
}
}
vecMulMatrix(&A[0][0],&B[0][0],&C[0][0],n);
for(i=0;i<n;i++){
for(j=0;j<n;j++){
printf("%.3f ",A[i][j]);
}
printf("\n");
}
printf("---\n");
for(i=0;i<n;i++){
for(j=0;j<n;j++){
printf("%.3f ",B[i][j]);
}
printf("\n");
}
printf("---\n");
for(i=0;i<n;i++){
for(j=0;j<n;j++){
printf("%.3f ",C[i][j]);
}
printf("\n");
}
return 0;
}
|
fd2c7e7e11bf20890c71bbf45ec234c0f28c4090.hip | // !!! This is a file automatically generated by hipify!!!
#include "CUAPI.h"
#include "CUFLU.h"
#ifdef GPU
// *******************************************
// ** CUDA stream objects are declared here **
hipStream_t *Stream;
// *******************************************
extern real (*d_Flu_Array_F_In )[FLU_NIN ][ CUBE(FLU_NXT) ];
extern real (*d_Flu_Array_F_Out)[FLU_NOUT][ CUBE(PS2) ];
extern real (*d_Flux_Array)[9][NFLUX_TOTAL][ SQR(PS2) ];
#ifdef UNSPLIT_GRAVITY
extern real (*d_Pot_Array_USG_F)[ CUBE(USG_NXT_F) ];
extern double (*d_Corner_Array_F)[3];
#endif
#ifdef DUAL_ENERGY
extern char (*d_DE_Array_F_Out)[ CUBE(PS2) ];
#endif
#ifdef MHD
extern real (*d_Mag_Array_F_In )[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ];
extern real (*d_Mag_Array_F_Out)[NCOMP_MAG][ PS2P1*SQR(PS2) ];
extern real (*d_Ele_Array )[9][NCOMP_ELE][ PS2P1*PS2 ];
extern real (*d_Mag_Array_T)[NCOMP_MAG][ PS1P1*SQR(PS1) ];
#endif
extern real *d_dt_Array_T;
extern real (*d_Flu_Array_T)[NCOMP_FLUID][ CUBE(PS1) ];
#if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
extern real (*d_PriVar) [NCOMP_TOTAL_PLUS_MAG][ CUBE(FLU_NXT) ];
extern real (*d_Slope_PPM)[3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_SLOPE_PPM) ];
extern real (*d_FC_Var) [6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ];
extern real (*d_FC_Flux) [3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ];
#ifdef MHD
extern real (*d_FC_Mag_Half)[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ];
extern real (*d_EC_Ele )[NCOMP_MAG][ CUBE(N_EC_ELE) ];
#endif
#endif // FLU_SCHEME
#if ( MODEL != HYDRO && MODEL != ELBDM )
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
#endif
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_MemAllocate_Fluid
// Description : Allocate GPU and CPU memory for the fluid solver
//
// Parameter : Flu_NPG : Number of patch groups evaluated simultaneously by GPU for the fluid solver
// Pot_NPG : Number of patch groups evaluated simultaneously by GPU for the gravity solver
// --> Here it is used only for the dt solver
// GPU_NStream : Number of CUDA stream objects
//-------------------------------------------------------------------------------------------------------
void CUAPI_MemAllocate_Fluid( const int Flu_NPG, const int Pot_NPG, const int GPU_NStream )
{
// size of the global memory arrays in all models
const int Flu_NP = 8*Flu_NPG;
# ifdef GRAVITY
const int Pot_NP = 8*Pot_NPG;
# endif
const long Flu_MemSize_F_In = sizeof(real )*Flu_NPG*FLU_NIN *CUBE(FLU_NXT);
const long Flu_MemSize_F_Out = sizeof(real )*Flu_NPG*FLU_NOUT*CUBE(PS2);
const long Flux_MemSize = sizeof(real )*Flu_NPG*9*NFLUX_TOTAL*SQR(PS2);
# ifdef UNSPLIT_GRAVITY
const long Pot_MemSize_USG_F = sizeof(real )*Flu_NPG*CUBE(USG_NXT_F);
const long Corner_MemSize = sizeof(double)*Flu_NPG*3;
# endif
# ifdef DUAL_ENERGY
const long DE_MemSize_F_Out = sizeof(char )*Flu_NPG*CUBE(PS2);
# endif
# ifdef MHD
const long Mag_MemSize_F_In = sizeof(real )*Flu_NPG*NCOMP_MAG*FLU_NXT_P1*SQR(FLU_NXT);
const long Mag_MemSize_F_Out = sizeof(real )*Flu_NPG*NCOMP_MAG*PS2P1*SQR(PS2);
const long Ele_MemSize = sizeof(real )*Flu_NPG*9*NCOMP_ELE*PS2P1*PS2;
const long Mag_MemSize_T = sizeof(real )*Flu_NP*NCOMP_MAG*PS1P1*SQR(PS1);
# endif
# ifdef GRAVITY
const long dt_MemSize_T = sizeof(real )*MAX( Flu_NP, Pot_NP ); // dt_Array_T is used for both DT_FLU_SOLVER and DT_GRA_SOLVER
# else
const long dt_MemSize_T = sizeof(real )*Flu_NP;
# endif
const long Flu_MemSize_T = sizeof(real )*Flu_NP*NCOMP_FLUID*CUBE(PS1);
// the size of the global memory arrays in different models
# if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
const long PriVar_MemSize = sizeof(real )*Flu_NPG *NCOMP_TOTAL_PLUS_MAG*CUBE(FLU_NXT);
const long FC_Var_MemSize = sizeof(real )*Flu_NPG*6*NCOMP_TOTAL_PLUS_MAG*CUBE(N_FC_VAR);
const long FC_Flux_MemSize = sizeof(real )*Flu_NPG*3*NCOMP_TOTAL_PLUS_MAG*CUBE(N_FC_FLUX);
# if ( LR_SCHEME == PPM )
const long Slope_PPM_MemSize = sizeof(real )*Flu_NPG*3*NCOMP_TOTAL_PLUS_MAG*CUBE(N_SLOPE_PPM);
# endif
# ifdef MHD
const long FC_Mag_Half_MemSize = sizeof(real )*Flu_NPG *NCOMP_MAG*FLU_NXT_P1*SQR(FLU_NXT);
const long EC_Ele_MemSize = sizeof(real )*Flu_NPG *NCOMP_MAG*CUBE(N_EC_ELE);
# endif
# endif // FLU_SCHEME
# if ( MODEL != HYDRO && MODEL != ELBDM )
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
# endif
// output the total memory requirement
long TotalSize = Flu_MemSize_F_In + Flu_MemSize_F_Out + dt_MemSize_T + Flu_MemSize_T;
if ( amr->WithFlux )
TotalSize += Flux_MemSize;
# ifdef UNSPLIT_GRAVITY
TotalSize += Pot_MemSize_USG_F;
if ( OPT__GRAVITY_TYPE == GRAVITY_EXTERNAL || OPT__GRAVITY_TYPE == GRAVITY_BOTH || OPT__EXTERNAL_POT )
TotalSize += Corner_MemSize;
# endif
# ifdef DUAL_ENERGY
TotalSize += DE_MemSize_F_Out;
# endif
# ifdef MHD
TotalSize += Mag_MemSize_F_In + Mag_MemSize_F_Out + Mag_MemSize_T;
if ( amr->WithElectric )
TotalSize += Ele_MemSize;
# endif
# if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
TotalSize += PriVar_MemSize + FC_Var_MemSize + FC_Flux_MemSize;
# if ( LR_SCHEME == PPM )
TotalSize += Slope_PPM_MemSize;
# endif
# ifdef MHD
TotalSize += FC_Mag_Half_MemSize + EC_Ele_MemSize;
# endif
# endif // MHM/MHM_RP/CTU
# if ( MODEL != HYDRO && MODEL != ELBDM )
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
# endif
if ( MPI_Rank == 0 )
Aux_Message( stdout, "NOTE : total memory requirement in GPU fluid solver = %ld MB\n", TotalSize/(1<<20) );
// allocate the device memory
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Flu_Array_F_In, Flu_MemSize_F_In ) );
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Flu_Array_F_Out, Flu_MemSize_F_Out ) );
if ( amr->WithFlux )
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Flux_Array, Flux_MemSize ) );
# ifdef UNSPLIT_GRAVITY
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Pot_Array_USG_F, Pot_MemSize_USG_F ) );
if ( OPT__GRAVITY_TYPE == GRAVITY_EXTERNAL || OPT__GRAVITY_TYPE == GRAVITY_BOTH || OPT__EXTERNAL_POT )
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Corner_Array_F, Corner_MemSize ) );
# endif
# ifdef DUAL_ENERGY
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_DE_Array_F_Out, DE_MemSize_F_Out ) );
# endif
# ifdef MHD
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Mag_Array_F_In, Mag_MemSize_F_In ) );
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Mag_Array_F_Out, Mag_MemSize_F_Out ) );
if ( amr->WithElectric )
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Ele_Array, Ele_MemSize ) );
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Mag_Array_T, Mag_MemSize_T ) );
# endif
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_dt_Array_T, dt_MemSize_T ) );
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Flu_Array_T, Flu_MemSize_T ) );
# if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_FC_Var, FC_Var_MemSize ) );
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_FC_Flux, FC_Flux_MemSize ) );
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_PriVar, PriVar_MemSize ) );
# if ( LR_SCHEME == PPM )
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_Slope_PPM, Slope_PPM_MemSize ) );
# endif
# ifdef MHD
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_FC_Mag_Half, FC_Mag_Half_MemSize ) );
CUDA_CHECK_ERROR( hipMalloc( (void**) &d_EC_Ele, EC_Ele_MemSize ) );
# endif
# endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
# if ( MODEL != HYDRO && MODEL != ELBDM )
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
# endif
// allocate the host memory by CUDA
for (int t=0; t<2; t++)
{
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Flu_Array_F_In [t], Flu_MemSize_F_In ) );
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Flu_Array_F_Out[t], Flu_MemSize_F_Out ) );
if ( amr->WithFlux )
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Flux_Array [t], Flux_MemSize ) );
# ifdef UNSPLIT_GRAVITY
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Pot_Array_USG_F[t], Pot_MemSize_USG_F ) );
if ( OPT__GRAVITY_TYPE == GRAVITY_EXTERNAL || OPT__GRAVITY_TYPE == GRAVITY_BOTH || OPT__EXTERNAL_POT )
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Corner_Array_F [t], Corner_MemSize ) );
# endif
# ifdef DUAL_ENERGY
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_DE_Array_F_Out [t], DE_MemSize_F_Out ) );
# endif
# ifdef MHD
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Mag_Array_F_In [t], Mag_MemSize_F_In ) );
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Mag_Array_F_Out[t], Mag_MemSize_F_Out ) );
if ( amr->WithElectric )
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Ele_Array [t], Ele_MemSize ) );
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Mag_Array_T [t], Mag_MemSize_T ) );
# endif
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_dt_Array_T [t], dt_MemSize_T ) );
CUDA_CHECK_ERROR( hipHostMalloc( (void**) &h_Flu_Array_T [t], Flu_MemSize_T ) );
} // for (int t=0; t<2; t++)
// create streams
Stream = new hipStream_t [GPU_NStream];
for (int s=0; s<GPU_NStream; s++) CUDA_CHECK_ERROR( hipStreamCreate( &Stream[s] ) );
} // FUNCTION : CUAPI_MemAllocate_Fluid
#endif // #ifdef GPU
| fd2c7e7e11bf20890c71bbf45ec234c0f28c4090.cu | #include "CUAPI.h"
#include "CUFLU.h"
#ifdef GPU
// *******************************************
// ** CUDA stream objects are declared here **
cudaStream_t *Stream;
// *******************************************
extern real (*d_Flu_Array_F_In )[FLU_NIN ][ CUBE(FLU_NXT) ];
extern real (*d_Flu_Array_F_Out)[FLU_NOUT][ CUBE(PS2) ];
extern real (*d_Flux_Array)[9][NFLUX_TOTAL][ SQR(PS2) ];
#ifdef UNSPLIT_GRAVITY
extern real (*d_Pot_Array_USG_F)[ CUBE(USG_NXT_F) ];
extern double (*d_Corner_Array_F)[3];
#endif
#ifdef DUAL_ENERGY
extern char (*d_DE_Array_F_Out)[ CUBE(PS2) ];
#endif
#ifdef MHD
extern real (*d_Mag_Array_F_In )[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ];
extern real (*d_Mag_Array_F_Out)[NCOMP_MAG][ PS2P1*SQR(PS2) ];
extern real (*d_Ele_Array )[9][NCOMP_ELE][ PS2P1*PS2 ];
extern real (*d_Mag_Array_T)[NCOMP_MAG][ PS1P1*SQR(PS1) ];
#endif
extern real *d_dt_Array_T;
extern real (*d_Flu_Array_T)[NCOMP_FLUID][ CUBE(PS1) ];
#if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
extern real (*d_PriVar) [NCOMP_TOTAL_PLUS_MAG][ CUBE(FLU_NXT) ];
extern real (*d_Slope_PPM)[3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_SLOPE_PPM) ];
extern real (*d_FC_Var) [6][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_VAR) ];
extern real (*d_FC_Flux) [3][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ];
#ifdef MHD
extern real (*d_FC_Mag_Half)[NCOMP_MAG][ FLU_NXT_P1*SQR(FLU_NXT) ];
extern real (*d_EC_Ele )[NCOMP_MAG][ CUBE(N_EC_ELE) ];
#endif
#endif // FLU_SCHEME
#if ( MODEL != HYDRO && MODEL != ELBDM )
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
#endif
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_MemAllocate_Fluid
// Description : Allocate GPU and CPU memory for the fluid solver
//
// Parameter : Flu_NPG : Number of patch groups evaluated simultaneously by GPU for the fluid solver
// Pot_NPG : Number of patch groups evaluated simultaneously by GPU for the gravity solver
// --> Here it is used only for the dt solver
// GPU_NStream : Number of CUDA stream objects
//-------------------------------------------------------------------------------------------------------
void CUAPI_MemAllocate_Fluid( const int Flu_NPG, const int Pot_NPG, const int GPU_NStream )
{
// size of the global memory arrays in all models
const int Flu_NP = 8*Flu_NPG;
# ifdef GRAVITY
const int Pot_NP = 8*Pot_NPG;
# endif
const long Flu_MemSize_F_In = sizeof(real )*Flu_NPG*FLU_NIN *CUBE(FLU_NXT);
const long Flu_MemSize_F_Out = sizeof(real )*Flu_NPG*FLU_NOUT*CUBE(PS2);
const long Flux_MemSize = sizeof(real )*Flu_NPG*9*NFLUX_TOTAL*SQR(PS2);
# ifdef UNSPLIT_GRAVITY
const long Pot_MemSize_USG_F = sizeof(real )*Flu_NPG*CUBE(USG_NXT_F);
const long Corner_MemSize = sizeof(double)*Flu_NPG*3;
# endif
# ifdef DUAL_ENERGY
const long DE_MemSize_F_Out = sizeof(char )*Flu_NPG*CUBE(PS2);
# endif
# ifdef MHD
const long Mag_MemSize_F_In = sizeof(real )*Flu_NPG*NCOMP_MAG*FLU_NXT_P1*SQR(FLU_NXT);
const long Mag_MemSize_F_Out = sizeof(real )*Flu_NPG*NCOMP_MAG*PS2P1*SQR(PS2);
const long Ele_MemSize = sizeof(real )*Flu_NPG*9*NCOMP_ELE*PS2P1*PS2;
const long Mag_MemSize_T = sizeof(real )*Flu_NP*NCOMP_MAG*PS1P1*SQR(PS1);
# endif
# ifdef GRAVITY
const long dt_MemSize_T = sizeof(real )*MAX( Flu_NP, Pot_NP ); // dt_Array_T is used for both DT_FLU_SOLVER and DT_GRA_SOLVER
# else
const long dt_MemSize_T = sizeof(real )*Flu_NP;
# endif
const long Flu_MemSize_T = sizeof(real )*Flu_NP*NCOMP_FLUID*CUBE(PS1);
// the size of the global memory arrays in different models
# if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
const long PriVar_MemSize = sizeof(real )*Flu_NPG *NCOMP_TOTAL_PLUS_MAG*CUBE(FLU_NXT);
const long FC_Var_MemSize = sizeof(real )*Flu_NPG*6*NCOMP_TOTAL_PLUS_MAG*CUBE(N_FC_VAR);
const long FC_Flux_MemSize = sizeof(real )*Flu_NPG*3*NCOMP_TOTAL_PLUS_MAG*CUBE(N_FC_FLUX);
# if ( LR_SCHEME == PPM )
const long Slope_PPM_MemSize = sizeof(real )*Flu_NPG*3*NCOMP_TOTAL_PLUS_MAG*CUBE(N_SLOPE_PPM);
# endif
# ifdef MHD
const long FC_Mag_Half_MemSize = sizeof(real )*Flu_NPG *NCOMP_MAG*FLU_NXT_P1*SQR(FLU_NXT);
const long EC_Ele_MemSize = sizeof(real )*Flu_NPG *NCOMP_MAG*CUBE(N_EC_ELE);
# endif
# endif // FLU_SCHEME
# if ( MODEL != HYDRO && MODEL != ELBDM )
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
# endif
// output the total memory requirement
long TotalSize = Flu_MemSize_F_In + Flu_MemSize_F_Out + dt_MemSize_T + Flu_MemSize_T;
if ( amr->WithFlux )
TotalSize += Flux_MemSize;
# ifdef UNSPLIT_GRAVITY
TotalSize += Pot_MemSize_USG_F;
if ( OPT__GRAVITY_TYPE == GRAVITY_EXTERNAL || OPT__GRAVITY_TYPE == GRAVITY_BOTH || OPT__EXTERNAL_POT )
TotalSize += Corner_MemSize;
# endif
# ifdef DUAL_ENERGY
TotalSize += DE_MemSize_F_Out;
# endif
# ifdef MHD
TotalSize += Mag_MemSize_F_In + Mag_MemSize_F_Out + Mag_MemSize_T;
if ( amr->WithElectric )
TotalSize += Ele_MemSize;
# endif
# if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
TotalSize += PriVar_MemSize + FC_Var_MemSize + FC_Flux_MemSize;
# if ( LR_SCHEME == PPM )
TotalSize += Slope_PPM_MemSize;
# endif
# ifdef MHD
TotalSize += FC_Mag_Half_MemSize + EC_Ele_MemSize;
# endif
# endif // MHM/MHM_RP/CTU
# if ( MODEL != HYDRO && MODEL != ELBDM )
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
# endif
if ( MPI_Rank == 0 )
Aux_Message( stdout, "NOTE : total memory requirement in GPU fluid solver = %ld MB\n", TotalSize/(1<<20) );
// allocate the device memory
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Flu_Array_F_In, Flu_MemSize_F_In ) );
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Flu_Array_F_Out, Flu_MemSize_F_Out ) );
if ( amr->WithFlux )
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Flux_Array, Flux_MemSize ) );
# ifdef UNSPLIT_GRAVITY
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Pot_Array_USG_F, Pot_MemSize_USG_F ) );
if ( OPT__GRAVITY_TYPE == GRAVITY_EXTERNAL || OPT__GRAVITY_TYPE == GRAVITY_BOTH || OPT__EXTERNAL_POT )
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Corner_Array_F, Corner_MemSize ) );
# endif
# ifdef DUAL_ENERGY
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_DE_Array_F_Out, DE_MemSize_F_Out ) );
# endif
# ifdef MHD
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Mag_Array_F_In, Mag_MemSize_F_In ) );
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Mag_Array_F_Out, Mag_MemSize_F_Out ) );
if ( amr->WithElectric )
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Ele_Array, Ele_MemSize ) );
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Mag_Array_T, Mag_MemSize_T ) );
# endif
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_dt_Array_T, dt_MemSize_T ) );
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Flu_Array_T, Flu_MemSize_T ) );
# if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_FC_Var, FC_Var_MemSize ) );
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_FC_Flux, FC_Flux_MemSize ) );
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_PriVar, PriVar_MemSize ) );
# if ( LR_SCHEME == PPM )
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_Slope_PPM, Slope_PPM_MemSize ) );
# endif
# ifdef MHD
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_FC_Mag_Half, FC_Mag_Half_MemSize ) );
CUDA_CHECK_ERROR( cudaMalloc( (void**) &d_EC_Ele, EC_Ele_MemSize ) );
# endif
# endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
# if ( MODEL != HYDRO && MODEL != ELBDM )
# warning : DO YOU WANT TO ADD SOMETHING HERE FOR THE NEW MODEL ??
# endif
// allocate the host memory by CUDA
for (int t=0; t<2; t++)
{
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Flu_Array_F_In [t], Flu_MemSize_F_In ) );
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Flu_Array_F_Out[t], Flu_MemSize_F_Out ) );
if ( amr->WithFlux )
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Flux_Array [t], Flux_MemSize ) );
# ifdef UNSPLIT_GRAVITY
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Pot_Array_USG_F[t], Pot_MemSize_USG_F ) );
if ( OPT__GRAVITY_TYPE == GRAVITY_EXTERNAL || OPT__GRAVITY_TYPE == GRAVITY_BOTH || OPT__EXTERNAL_POT )
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Corner_Array_F [t], Corner_MemSize ) );
# endif
# ifdef DUAL_ENERGY
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_DE_Array_F_Out [t], DE_MemSize_F_Out ) );
# endif
# ifdef MHD
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Mag_Array_F_In [t], Mag_MemSize_F_In ) );
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Mag_Array_F_Out[t], Mag_MemSize_F_Out ) );
if ( amr->WithElectric )
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Ele_Array [t], Ele_MemSize ) );
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Mag_Array_T [t], Mag_MemSize_T ) );
# endif
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_dt_Array_T [t], dt_MemSize_T ) );
CUDA_CHECK_ERROR( cudaMallocHost( (void**) &h_Flu_Array_T [t], Flu_MemSize_T ) );
} // for (int t=0; t<2; t++)
// create streams
Stream = new cudaStream_t [GPU_NStream];
for (int s=0; s<GPU_NStream; s++) CUDA_CHECK_ERROR( cudaStreamCreate( &Stream[s] ) );
} // FUNCTION : CUAPI_MemAllocate_Fluid
#endif // #ifdef GPU
|
87c7fb548fc7d6bb8bdf4aa63dfdb061e7fabdf8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stereo.h"
__global__
void SolveDataL1Kernel(const float *duhat0, const float *dvhat0,
const float *pu1, const float *pu2,
const float *pv1, const float *pv2,
const float *Ix, const float *Iy, const float *It,
int width, int height, int stride,
float lambda, float theta,
float *duhat1, float *dvhat1)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
float dix, diy, dit, duhat, dvhat, du, dv;
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride; // current pixel index
dix = Ix[pos];
diy = Iy[pos];
dit = It[pos];
float duhat = duhat0[pos];
float dvhat = dvhat0[pos];
//problem 1a
float rho = (dix*duhat + diy*dvhat + dit);
float upper = lambda*theta*(dix*dix + diy*diy);
float lower = -lambda*theta*(dix*dix + diy*diy);;
if ((rho <= upper) && (rho >= lower)) {
float magi = dix*dix + diy*diy;
if (magi != 0) {
du = duhat - rho*dix / magi;
dv = dvhat - rho*diy / magi;
}
else {
du = duhat;
dv = dvhat;
}
}
else if (rho < lower) {
du = duhat + lambda*theta*dix;
dv = dvhat + lambda*theta*diy;
}
else if (rho > upper) {
du = duhat - lambda*theta*dix;
dv = dvhat - lambda*theta*diy;
}
//problem 1b
float divpu, divpv;
int left = (ix - 1) + iy * stride;
int right = (ix + 1) + iy * stride;
int down = ix + (iy - 1) * stride;
int up = ix + (iy + 1) * stride;
if ((ix - 1) < 0) {
if ((iy - 1) < 0) {
//divpu = pu1[right] - pu1[pos] + pu2[up] - pu2[pos];
//divpv = pv1[right] - pv1[pos] + pv2[up] - pv2[pos];
divpu = pu1[pos] + pu2[pos];
divpv = pv1[pos] + pv2[pos];
}
else {
//divpu = pu1[right] - pu1[pos] + pu2[pos] - pu2[down];
//divpv = pv1[right] - pv1[pos] + pv2[pos] - pv2[down];
divpu = pu1[pos] + pu2[pos] - pu2[down];
divpv = pv1[pos] + pv2[pos] - pv2[down];
}
}
else {
if ((iy - 1) < 0) {
//divpu = pu1[pos] - pu1[left] + pu2[up] - pu2[pos];
//divpv = pv1[pos] - pv1[left] + pv2[up] - pv2[pos];
divpu = pu1[pos] - pu1[left] + pu2[pos];
divpv = pv1[pos] - pv1[left] + pv2[pos];
}
else {
divpu = pu1[pos] - pu1[left] + pu2[pos] - pu2[down];
divpv = pv1[pos] - pv1[left] + pv2[pos] - pv2[down];
}
}
duhat1[pos] = du + theta*divpu;
dvhat1[pos] = dv + theta*divpv;
}
}
void Stereo::SolveDataL1(const float *duhat0, const float *dvhat0,
const float *pu1, const float *pu2,
const float *pv1, const float *pv2,
const float *Ix, const float *Iy, const float *Iz,
int w, int h, int s,
float lambda, float theta,
float *duhat1, float *dvhat1)
{
// CTA size
dim3 threads(BlockWidth, BlockHeight);
// grid size
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
hipLaunchKernelGGL(( SolveDataL1Kernel) , dim3(blocks), dim3(threads) , 0, 0, duhat0, dvhat0,
pu1, pu2,
pv1, pv2,
Ix, Iy, Iz,
w, h, s,
lambda, theta,
duhat1, dvhat1);
}
// *******************************
// Solve Data-L1 Stereo
// *******************************
__global__
void SolveDataL1StereoKernel(const float *dwhat0,
const float *pw1, const float *pw2,
const float *Iw, const float *It,
int width, int height, int stride,
float lambda, float theta,
float *dwhat1)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
float diw, dit, dwhat, dw;
float desiredRadius = (float)width / 2.20f;
float halfWidth = (float)width / 2.0f;
float halfHeight = (float)height / 2.0f;
float radius = sqrtf((iy - halfHeight) * (iy - halfHeight) + (ix - halfWidth) * (ix - halfWidth));
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;// current pixel index
if (radius >= halfWidth)
{
dwhat1[pos] = 0.0f;
}
else
{
diw = Iw[pos];
dit = It[pos];
float dwhat = dwhat0[pos];
//problem 1a
float magi = (diw * diw);
float rho = (diw*dwhat + dit);
//float upper = lambda * theta*(diw * diw);
//float lower = -lambda * theta*(diw * diw);
float upper = lambda * theta*(magi);
float lower = -lambda * theta*(magi);
if ((rho <= upper) && (rho >= lower)) {
if (diw != 0) {
dw = dwhat - rho / diw;
}
else {
dw = dwhat;
}
}
else if (rho < lower) {
dw = dwhat + lambda * theta*diw;
}
else if (rho > upper) {
dw = dwhat - lambda * theta*diw;
}
/*if (dw > 0.5f) {
dw = 0.5f;
}*/
/*else if (dw < 0.0f) {
dw = 0.0f;
}*/
//problem 1b
float divpw;
int left = (ix - 1) + iy * stride;
// int right = (ix + 1) + iy * stride;
int down = ix + (iy - 1) * stride;
// int up = ix + (iy + 1) * stride;
if ((ix - 1) < 0) {
if ((iy - 1) < 0) {
//divpu = pu1[right] - pu1[pos] + pu2[up] - pu2[pos];
//divpv = pv1[right] - pv1[pos] + pv2[up] - pv2[pos];
divpw = pw1[pos] + pw2[pos];
}
else {
//divpu = pu1[right] - pu1[pos] + pu2[pos] - pu2[down];
//divpv = pv1[right] - pv1[pos] + pv2[pos] - pv2[down];
divpw = pw1[pos] + pw2[pos] - pw2[down];
}
}
else {
if ((iy - 1) < 0) {
//divpu = pu1[pos] - pu1[left] + pu2[up] - pu2[pos];
//divpv = pv1[pos] - pv1[left] + pv2[up] - pv2[pos];
divpw = pw1[pos] - pw1[left] + pw2[pos];
}
else {
divpw = pw1[pos] - pw1[left] + pw2[pos] - pw2[down];
}
}
float dwval = dw + theta * divpw;
dwhat1[pos] = dwval;
/*if (dwval < 0) {
dwhat1[pos] = 0.0f;
}
else if (dwval > 0.5f) {
dwhat1[pos] = 0.5f;
}
else {
dwhat1[pos] = dwval;
}*/
}
//dwhat1[pos] = dw + theta * divpu;
}
}
void Stereo::SolveDataL1Stereo(const float *dwhat0,
const float *pw1, const float *pw2,
const float *Iw, const float *Iz,
int w, int h, int s,
float lambda, float theta,
float *dwhat1)
{
// CTA size
dim3 threads(BlockWidth, BlockHeight);
// grid size
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
SolveDataL1StereoKernel << < blocks, threads >> > (dwhat0,
pw1, pw2,
Iw, Iz,
w, h, s,
lambda, theta,
dwhat1);
} | 87c7fb548fc7d6bb8bdf4aa63dfdb061e7fabdf8.cu | #include "stereo.h"
__global__
void SolveDataL1Kernel(const float *duhat0, const float *dvhat0,
const float *pu1, const float *pu2,
const float *pv1, const float *pv2,
const float *Ix, const float *Iy, const float *It,
int width, int height, int stride,
float lambda, float theta,
float *duhat1, float *dvhat1)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
float dix, diy, dit, duhat, dvhat, du, dv;
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride; // current pixel index
dix = Ix[pos];
diy = Iy[pos];
dit = It[pos];
float duhat = duhat0[pos];
float dvhat = dvhat0[pos];
//problem 1a
float rho = (dix*duhat + diy*dvhat + dit);
float upper = lambda*theta*(dix*dix + diy*diy);
float lower = -lambda*theta*(dix*dix + diy*diy);;
if ((rho <= upper) && (rho >= lower)) {
float magi = dix*dix + diy*diy;
if (magi != 0) {
du = duhat - rho*dix / magi;
dv = dvhat - rho*diy / magi;
}
else {
du = duhat;
dv = dvhat;
}
}
else if (rho < lower) {
du = duhat + lambda*theta*dix;
dv = dvhat + lambda*theta*diy;
}
else if (rho > upper) {
du = duhat - lambda*theta*dix;
dv = dvhat - lambda*theta*diy;
}
//problem 1b
float divpu, divpv;
int left = (ix - 1) + iy * stride;
int right = (ix + 1) + iy * stride;
int down = ix + (iy - 1) * stride;
int up = ix + (iy + 1) * stride;
if ((ix - 1) < 0) {
if ((iy - 1) < 0) {
//divpu = pu1[right] - pu1[pos] + pu2[up] - pu2[pos];
//divpv = pv1[right] - pv1[pos] + pv2[up] - pv2[pos];
divpu = pu1[pos] + pu2[pos];
divpv = pv1[pos] + pv2[pos];
}
else {
//divpu = pu1[right] - pu1[pos] + pu2[pos] - pu2[down];
//divpv = pv1[right] - pv1[pos] + pv2[pos] - pv2[down];
divpu = pu1[pos] + pu2[pos] - pu2[down];
divpv = pv1[pos] + pv2[pos] - pv2[down];
}
}
else {
if ((iy - 1) < 0) {
//divpu = pu1[pos] - pu1[left] + pu2[up] - pu2[pos];
//divpv = pv1[pos] - pv1[left] + pv2[up] - pv2[pos];
divpu = pu1[pos] - pu1[left] + pu2[pos];
divpv = pv1[pos] - pv1[left] + pv2[pos];
}
else {
divpu = pu1[pos] - pu1[left] + pu2[pos] - pu2[down];
divpv = pv1[pos] - pv1[left] + pv2[pos] - pv2[down];
}
}
duhat1[pos] = du + theta*divpu;
dvhat1[pos] = dv + theta*divpv;
}
}
void Stereo::SolveDataL1(const float *duhat0, const float *dvhat0,
const float *pu1, const float *pu2,
const float *pv1, const float *pv2,
const float *Ix, const float *Iy, const float *Iz,
int w, int h, int s,
float lambda, float theta,
float *duhat1, float *dvhat1)
{
// CTA size
dim3 threads(BlockWidth, BlockHeight);
// grid size
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
SolveDataL1Kernel <<< blocks, threads >>> (duhat0, dvhat0,
pu1, pu2,
pv1, pv2,
Ix, Iy, Iz,
w, h, s,
lambda, theta,
duhat1, dvhat1);
}
// *******************************
// Solve Data-L1 Stereo
// *******************************
__global__
void SolveDataL1StereoKernel(const float *dwhat0,
const float *pw1, const float *pw2,
const float *Iw, const float *It,
int width, int height, int stride,
float lambda, float theta,
float *dwhat1)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
float diw, dit, dwhat, dw;
float desiredRadius = (float)width / 2.20f;
float halfWidth = (float)width / 2.0f;
float halfHeight = (float)height / 2.0f;
float radius = sqrtf((iy - halfHeight) * (iy - halfHeight) + (ix - halfWidth) * (ix - halfWidth));
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;// current pixel index
if (radius >= halfWidth)
{
dwhat1[pos] = 0.0f;
}
else
{
diw = Iw[pos];
dit = It[pos];
float dwhat = dwhat0[pos];
//problem 1a
float magi = (diw * diw);
float rho = (diw*dwhat + dit);
//float upper = lambda * theta*(diw * diw);
//float lower = -lambda * theta*(diw * diw);
float upper = lambda * theta*(magi);
float lower = -lambda * theta*(magi);
if ((rho <= upper) && (rho >= lower)) {
if (diw != 0) {
dw = dwhat - rho / diw;
}
else {
dw = dwhat;
}
}
else if (rho < lower) {
dw = dwhat + lambda * theta*diw;
}
else if (rho > upper) {
dw = dwhat - lambda * theta*diw;
}
/*if (dw > 0.5f) {
dw = 0.5f;
}*/
/*else if (dw < 0.0f) {
dw = 0.0f;
}*/
//problem 1b
float divpw;
int left = (ix - 1) + iy * stride;
// int right = (ix + 1) + iy * stride;
int down = ix + (iy - 1) * stride;
// int up = ix + (iy + 1) * stride;
if ((ix - 1) < 0) {
if ((iy - 1) < 0) {
//divpu = pu1[right] - pu1[pos] + pu2[up] - pu2[pos];
//divpv = pv1[right] - pv1[pos] + pv2[up] - pv2[pos];
divpw = pw1[pos] + pw2[pos];
}
else {
//divpu = pu1[right] - pu1[pos] + pu2[pos] - pu2[down];
//divpv = pv1[right] - pv1[pos] + pv2[pos] - pv2[down];
divpw = pw1[pos] + pw2[pos] - pw2[down];
}
}
else {
if ((iy - 1) < 0) {
//divpu = pu1[pos] - pu1[left] + pu2[up] - pu2[pos];
//divpv = pv1[pos] - pv1[left] + pv2[up] - pv2[pos];
divpw = pw1[pos] - pw1[left] + pw2[pos];
}
else {
divpw = pw1[pos] - pw1[left] + pw2[pos] - pw2[down];
}
}
float dwval = dw + theta * divpw;
dwhat1[pos] = dwval;
/*if (dwval < 0) {
dwhat1[pos] = 0.0f;
}
else if (dwval > 0.5f) {
dwhat1[pos] = 0.5f;
}
else {
dwhat1[pos] = dwval;
}*/
}
//dwhat1[pos] = dw + theta * divpu;
}
}
void Stereo::SolveDataL1Stereo(const float *dwhat0,
const float *pw1, const float *pw2,
const float *Iw, const float *Iz,
int w, int h, int s,
float lambda, float theta,
float *dwhat1)
{
// CTA size
dim3 threads(BlockWidth, BlockHeight);
// grid size
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
SolveDataL1StereoKernel << < blocks, threads >> > (dwhat0,
pw1, pw2,
Iw, Iz,
w, h, s,
lambda, theta,
dwhat1);
} |
d812499fdfd56cfd7bd9abd227df12753da088a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "gpu_list_ranking.h"
#include "gpu_util.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#define BLOCK_SIZE 128
using namespace std;
GPUList::GPUList(Game& g, GPUGame& g_dev)
: GPUParity(g, g_dev), g(g), g_dev(g_dev)
{
int n = g.get_n_vertices();
l = 2*n + 2; // The length of the list
s = (l - 1) / 64 + 1; // The number of splitters
init_zero(&succ, l);
init_zero(&scratch, s);
init_zero(&split_succ, s);
h_split_succ = (int*)malloc(s * sizeof(int));
init_zero(&split_val, s * (g.get_max_pri() + 1));
h_split_val = (int*)malloc(s * (g.get_max_pri() + 1) * sizeof(int));
h_cum_val = (int*)malloc(s * (g.get_max_pri() + 1) * sizeof(int));
init_zero(&split_inf, s);
h_split_inf = (int*)malloc(s * sizeof(int));
init_zero(&vert_list, n+1);
}
/************************ BUILD LIST ***************************************/
/*
* Initalizes the succ array for d_build_list.
*
* Node i has two entries:
* - succ[2i] holds the "down" direction
* - succ[2i + 1] holds the "up" direction
*
* The sink is represented by succ[2n] (down) and succ[2n+1] (up)
*
* Each "up" edge points to the corresponding "down" edge. The down edges will
* be filled in by d_build_list.
*/
__global__ void d_init_list(int* succ, int* vert_list, int n)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= n+1)
return;
succ[2*idx + 1] = 2*idx;
if(idx == n)
succ[2*n] = -1; // The head of the list
else
vert_list[idx] = -1; // Initialize vert_list for later
}
/*
* Fill the succ array with a linked list that is an euler tour of the (pseudo)
* forest defined by strategy.
*
*/
__global__ void d_build_list(int* strategy, int* succ, int n)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= n)
return;
int next = strategy[idx];
// Compute the address of the "up" node for next
int next_up;
if(next == -1)
next_up = 2 * n + 1;
else
next_up = 2 * next + 1;
int my_up = 2 * idx + 1;
int my_down = 2*idx;
int new_down = atomicExch(&succ[next_up], my_up);
succ[my_down] = new_down;
}
/********************************* PICK SPLITTERS **************************/
__global__ void d_pick_splitters(int* succ, int* scratch, int l, int s)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= s)
return;
int splitter;
if(idx == s-1)
splitter = l - 1; // The head of the list is always a splitter
else
splitter = (int)(((float)(l-1) / (float)s) * (float)idx);
scratch[idx] = succ[splitter];
succ[splitter] = -100 -idx;
}
/****************************** TRAVERSE SUBLISTS **************************/
__device__ void inline d_init_val(int* val, int n_pris, int split, int thread_pri)
{
int base_addr = split * n_pris;
//if(thread_pri == 0)
//for(int i = 0; i < n_pris; i++)
//val[base_addr + i] = 0;
val[base_addr + thread_pri] = 0;
}
__device__ void inline d_update_val(int* val, int* priority, int* val_scratch,
int* vert_list, int current, int n_pris, int n_vertices, int split, int
thread_pri)
{
int vertex = current / 2;
if(vertex == n_vertices or current == -1) // Sink vertex has no priority
return;
// Direction is -1 for down and +1 for up
int direction = ((current % 2) * 2 - 1);
// Add in this vertex's priority
int p = priority[vertex];
if(thread_pri == p)
{
*val += direction;
}
// If current is an up edge, update val_scratch
if(current % 2 == 1)
{
val_scratch[vertex*n_pris + thread_pri] = *val;
// If this is the up edge, store the location of the list
if(thread_pri == 0)
vert_list[vertex] = split;
}
}
__global__ void d_traverse_sublists(int* succ, int* scratch,
int* split_succ, int* val, int* priority, int* vert_list, int*
val_scratch, int s, int n_pris, int n_pris_rounded, int n_vertices, int threads)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= threads)
return;
int split_index = idx/n_pris_rounded;
int thread_pri = idx % n_pris_rounded;
if(thread_pri >= n_pris)
return;
while(split_index < s)
{
int cum_val = 0;
// Get the first step of the list
int current = scratch[split_index];
while(current >= 0)
{
d_update_val(&cum_val, priority, val_scratch, vert_list, current, n_pris,
n_vertices, split_index, thread_pri);
current = succ[current];
}
int base_addr = split_index * n_pris;
val[base_addr + thread_pri] = cum_val;
if(thread_pri == 0)
split_succ[split_index] = current;
split_index += threads/n_pris;
}
}
/********************* PROCESS REDUCED ***********************************/
void process_reduced(int* split_succ, int* split_val,
int* cum_val, int* split_inf, int n_pris, int s)
{
// Initialize the infinite array
for(int i = 0; i < s; i++)
split_inf[i] = 1;
// Initialize cumval
for(int i = 0; i < s * n_pris; i++)
cum_val[i] = 0;
// Walk the reduced list from the start
int current = s-1; // Up edge of the sink
int val[n_pris];
for(int i = 0; i < n_pris; i++)
val[i] = 0;
while(current != -1)
{
// Copy the current val into the out array
for(int i = 0; i < n_pris; i++)
{
cum_val[current*n_pris + i] = val[i];
}
// Add in the next segment
for(int i = 0; i < n_pris; i++)
{
val[i] += split_val[current*n_pris + i];
}
// This vertex is not infinite
split_inf[current] = 0;
// Move to the next vertex
int next = split_succ[current];
if(next == -1) // The sink
break;
current = -(next+100); // -100 offset was used in pick_splitters
}
}
/************************** BROADCAST VAL *********************************/
__global__ void d_broadcast_val(int* val, int* split_val, int* val_scratch, int*
vert_list, int* split_infinite, int* infinite, int n_pris, int pri_mem)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= pri_mem)
return;
int vertex = idx / n_pris;
int sublist = vert_list[vertex];
if(infinite[vertex] == -1) // Odd wins this vertex, ignore it
return;
if(sublist == -1) // Vertex was not touched by any sublist
{
infinite[vertex] = 1;
return;
}
int pri = idx % n_pris;
if(split_infinite[sublist] == 0)
{
val[idx] = val_scratch[idx] + split_val[sublist*n_pris + pri];
infinite[vertex] = 0;
//printf("vertex %d, sublist %d\n", vertex, sublist);
}
else
{
infinite[vertex] = 1;
}
}
void GPUList::compute_valuation()
{
current_val = val1;
int n = g.get_n_vertices();
int n_pris = g.get_max_pri() + 1;
int round_boundary = 4;
int n_pris_rounded;
if(n_pris % round_boundary == 0)
n_pris_rounded = n_pris;
else
n_pris_rounded = (n_pris/round_boundary + 1) * round_boundary;
// Build the successor array
hipLaunchKernelGGL(( d_init_list) , dim3(nblocks(n+1)), dim3(BLOCK_SIZE) , 0, 0, succ, vert_list, n);
hipLaunchKernelGGL(( d_build_list) , dim3(nblocks(n)), dim3(BLOCK_SIZE) , 0, 0, strategy, succ, n);
// Pick the splitters
hipLaunchKernelGGL(( d_pick_splitters) , dim3(nblocks(s)), dim3(BLOCK_SIZE) , 0, 0, succ, scratch, l, s);
// Reduce the list
int traverse_threads = 4* 4096 * 16 * n_pris_rounded;
hipLaunchKernelGGL(( d_traverse_sublists) , dim3(nblocks(traverse_threads)), dim3(BLOCK_SIZE) , 0, 0, succ,
scratch, split_succ, split_val, g_dev.priority,
vert_list, val2, s, n_pris, n_pris_rounded, n, traverse_threads);
// Copy reduced list to host and process
int split_mem = s * sizeof(int);
int split_pri_mem = s * n_pris * sizeof(int);
gpu_assert(hipMemcpy(h_split_succ, split_succ, split_mem, hipMemcpyDeviceToHost));
gpu_assert(hipMemcpy(h_split_val, split_val, split_pri_mem, hipMemcpyDeviceToHost));
process_reduced(h_split_succ, h_split_val, h_cum_val, h_split_inf, n_pris, s);
gpu_assert(hipMemcpy(split_val, h_cum_val, split_pri_mem, hipMemcpyHostToDevice));
gpu_assert(hipMemcpy(split_inf, h_split_inf, split_mem, hipMemcpyHostToDevice));
// Broadcast values out to the vertices
int pri_mem = n * n_pris;
hipLaunchKernelGGL(( d_broadcast_val) , dim3(nblocks(pri_mem)), dim3(BLOCK_SIZE) , 0, 0, val1, split_val,
val2, vert_list, split_inf, infinite, n_pris, pri_mem);
}
void GPUList::compute_first_val()
{
GPUParity::compute_valuation();
}
| d812499fdfd56cfd7bd9abd227df12753da088a0.cu | #include "gpu_list_ranking.h"
#include "gpu_util.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime.h>
#include <iostream>
#define BLOCK_SIZE 128
using namespace std;
GPUList::GPUList(Game& g, GPUGame& g_dev)
: GPUParity(g, g_dev), g(g), g_dev(g_dev)
{
int n = g.get_n_vertices();
l = 2*n + 2; // The length of the list
s = (l - 1) / 64 + 1; // The number of splitters
init_zero(&succ, l);
init_zero(&scratch, s);
init_zero(&split_succ, s);
h_split_succ = (int*)malloc(s * sizeof(int));
init_zero(&split_val, s * (g.get_max_pri() + 1));
h_split_val = (int*)malloc(s * (g.get_max_pri() + 1) * sizeof(int));
h_cum_val = (int*)malloc(s * (g.get_max_pri() + 1) * sizeof(int));
init_zero(&split_inf, s);
h_split_inf = (int*)malloc(s * sizeof(int));
init_zero(&vert_list, n+1);
}
/************************ BUILD LIST ***************************************/
/*
* Initalizes the succ array for d_build_list.
*
* Node i has two entries:
* - succ[2i] holds the "down" direction
* - succ[2i + 1] holds the "up" direction
*
* The sink is represented by succ[2n] (down) and succ[2n+1] (up)
*
* Each "up" edge points to the corresponding "down" edge. The down edges will
* be filled in by d_build_list.
*/
__global__ void d_init_list(int* succ, int* vert_list, int n)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= n+1)
return;
succ[2*idx + 1] = 2*idx;
if(idx == n)
succ[2*n] = -1; // The head of the list
else
vert_list[idx] = -1; // Initialize vert_list for later
}
/*
* Fill the succ array with a linked list that is an euler tour of the (pseudo)
* forest defined by strategy.
*
*/
__global__ void d_build_list(int* strategy, int* succ, int n)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= n)
return;
int next = strategy[idx];
// Compute the address of the "up" node for next
int next_up;
if(next == -1)
next_up = 2 * n + 1;
else
next_up = 2 * next + 1;
int my_up = 2 * idx + 1;
int my_down = 2*idx;
int new_down = atomicExch(&succ[next_up], my_up);
succ[my_down] = new_down;
}
/********************************* PICK SPLITTERS **************************/
__global__ void d_pick_splitters(int* succ, int* scratch, int l, int s)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= s)
return;
int splitter;
if(idx == s-1)
splitter = l - 1; // The head of the list is always a splitter
else
splitter = (int)(((float)(l-1) / (float)s) * (float)idx);
scratch[idx] = succ[splitter];
succ[splitter] = -100 -idx;
}
/****************************** TRAVERSE SUBLISTS **************************/
__device__ void inline d_init_val(int* val, int n_pris, int split, int thread_pri)
{
int base_addr = split * n_pris;
//if(thread_pri == 0)
//for(int i = 0; i < n_pris; i++)
//val[base_addr + i] = 0;
val[base_addr + thread_pri] = 0;
}
__device__ void inline d_update_val(int* val, int* priority, int* val_scratch,
int* vert_list, int current, int n_pris, int n_vertices, int split, int
thread_pri)
{
int vertex = current / 2;
if(vertex == n_vertices or current == -1) // Sink vertex has no priority
return;
// Direction is -1 for down and +1 for up
int direction = ((current % 2) * 2 - 1);
// Add in this vertex's priority
int p = priority[vertex];
if(thread_pri == p)
{
*val += direction;
}
// If current is an up edge, update val_scratch
if(current % 2 == 1)
{
val_scratch[vertex*n_pris + thread_pri] = *val;
// If this is the up edge, store the location of the list
if(thread_pri == 0)
vert_list[vertex] = split;
}
}
__global__ void d_traverse_sublists(int* succ, int* scratch,
int* split_succ, int* val, int* priority, int* vert_list, int*
val_scratch, int s, int n_pris, int n_pris_rounded, int n_vertices, int threads)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= threads)
return;
int split_index = idx/n_pris_rounded;
int thread_pri = idx % n_pris_rounded;
if(thread_pri >= n_pris)
return;
while(split_index < s)
{
int cum_val = 0;
// Get the first step of the list
int current = scratch[split_index];
while(current >= 0)
{
d_update_val(&cum_val, priority, val_scratch, vert_list, current, n_pris,
n_vertices, split_index, thread_pri);
current = succ[current];
}
int base_addr = split_index * n_pris;
val[base_addr + thread_pri] = cum_val;
if(thread_pri == 0)
split_succ[split_index] = current;
split_index += threads/n_pris;
}
}
/********************* PROCESS REDUCED ***********************************/
void process_reduced(int* split_succ, int* split_val,
int* cum_val, int* split_inf, int n_pris, int s)
{
// Initialize the infinite array
for(int i = 0; i < s; i++)
split_inf[i] = 1;
// Initialize cumval
for(int i = 0; i < s * n_pris; i++)
cum_val[i] = 0;
// Walk the reduced list from the start
int current = s-1; // Up edge of the sink
int val[n_pris];
for(int i = 0; i < n_pris; i++)
val[i] = 0;
while(current != -1)
{
// Copy the current val into the out array
for(int i = 0; i < n_pris; i++)
{
cum_val[current*n_pris + i] = val[i];
}
// Add in the next segment
for(int i = 0; i < n_pris; i++)
{
val[i] += split_val[current*n_pris + i];
}
// This vertex is not infinite
split_inf[current] = 0;
// Move to the next vertex
int next = split_succ[current];
if(next == -1) // The sink
break;
current = -(next+100); // -100 offset was used in pick_splitters
}
}
/************************** BROADCAST VAL *********************************/
__global__ void d_broadcast_val(int* val, int* split_val, int* val_scratch, int*
vert_list, int* split_infinite, int* infinite, int n_pris, int pri_mem)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= pri_mem)
return;
int vertex = idx / n_pris;
int sublist = vert_list[vertex];
if(infinite[vertex] == -1) // Odd wins this vertex, ignore it
return;
if(sublist == -1) // Vertex was not touched by any sublist
{
infinite[vertex] = 1;
return;
}
int pri = idx % n_pris;
if(split_infinite[sublist] == 0)
{
val[idx] = val_scratch[idx] + split_val[sublist*n_pris + pri];
infinite[vertex] = 0;
//printf("vertex %d, sublist %d\n", vertex, sublist);
}
else
{
infinite[vertex] = 1;
}
}
void GPUList::compute_valuation()
{
current_val = val1;
int n = g.get_n_vertices();
int n_pris = g.get_max_pri() + 1;
int round_boundary = 4;
int n_pris_rounded;
if(n_pris % round_boundary == 0)
n_pris_rounded = n_pris;
else
n_pris_rounded = (n_pris/round_boundary + 1) * round_boundary;
// Build the successor array
d_init_list <<< nblocks(n+1), BLOCK_SIZE >>> (succ, vert_list, n);
d_build_list <<< nblocks(n), BLOCK_SIZE >>> (strategy, succ, n);
// Pick the splitters
d_pick_splitters <<< nblocks(s), BLOCK_SIZE >>> (succ, scratch, l, s);
// Reduce the list
int traverse_threads = 4* 4096 * 16 * n_pris_rounded;
d_traverse_sublists <<< nblocks(traverse_threads), BLOCK_SIZE >>> (succ,
scratch, split_succ, split_val, g_dev.priority,
vert_list, val2, s, n_pris, n_pris_rounded, n, traverse_threads);
// Copy reduced list to host and process
int split_mem = s * sizeof(int);
int split_pri_mem = s * n_pris * sizeof(int);
gpu_assert(cudaMemcpy(h_split_succ, split_succ, split_mem, cudaMemcpyDeviceToHost));
gpu_assert(cudaMemcpy(h_split_val, split_val, split_pri_mem, cudaMemcpyDeviceToHost));
process_reduced(h_split_succ, h_split_val, h_cum_val, h_split_inf, n_pris, s);
gpu_assert(cudaMemcpy(split_val, h_cum_val, split_pri_mem, cudaMemcpyHostToDevice));
gpu_assert(cudaMemcpy(split_inf, h_split_inf, split_mem, cudaMemcpyHostToDevice));
// Broadcast values out to the vertices
int pri_mem = n * n_pris;
d_broadcast_val <<< nblocks(pri_mem), BLOCK_SIZE >>> (val1, split_val,
val2, vert_list, split_inf, infinite, n_pris, pri_mem);
}
void GPUList::compute_first_val()
{
GPUParity::compute_valuation();
}
|
b5697f3029e53c7d7973f9918a42cfa953d8c590.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_sqrt(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = sqrt(A[index]);
}
} | b5697f3029e53c7d7973f9918a42cfa953d8c590.cu | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_sqrt(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = sqrt(A[index]);
}
} |
d2cc1ad4c8c53ab66c44ffc93411307c7e80f5c5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef __CUDALOCKSBARRIERFAST_CU__
#define __CUDALOCKSBARRIERFAST_CU__
#include "cudaLocks.h"
/*
Helper function to set the passed in inVars flag to 1 (signifies that this TB
has joined the barrier).
*/
inline __device__ void setMyInFlag(unsigned int * inVars,
const unsigned int threadID,
const unsigned int blockID) {
if (threadID == 0)
{
// atomicExch acts as a store release, need TF to enforce ordering
__threadfence();
atomicExch((unsigned int *)(inVars + blockID), 1);
}
__syncthreads();
}
/*
Helper function for the main TB of this group to spin, checking to see if
all other TBs joining this barrier have joined or not.
*/
inline __device__ void spinOnInFlags(unsigned int * inVars,
const int threadID,
const int numThreads,
const int numBlocks) {
// local variables
int done3 = 1;
// "main" TB loops, checking if everyone else has joined the barrier.
do
{
done3 = 1;
/*
Each thread in the main TB accesses a subset of the blocks, checking
if they have joined the barrier yet or not.
*/
for (int i = threadID; i < numBlocks; i += numThreads)
{
if (reinterpret_cast<volatile int * >(inVars)[i] != 1) {
// acts as a load acquire, need TF to enforce ordering
__threadfence();
done3 = 0;
// if one of them isn't ready, don't bother checking the others (just
// increases traffic)
break;
}
}
} while (!done3);
/*
When all the necessary TBs have joined the barrier, the threads will
reconverge here -- this avoids unnecessary atomic accesses for threads
whose assigned TBs have already joined the barrier.
*/
__syncthreads();
}
/*
Helper function for the main TB of this group to spin, checking to see if
all other TBs joining this barrier have joined or not.
*/
inline __device__ void spinOnInFlags_local(unsigned int * inVars,
const int threadID,
const int numThreads,
const int numBlocks) {
// local variables
int done3 = 1;
// "main" TB loops, checking if everyone else has joined the barrier.
do
{
done3 = 1;
/*
Each thread in the main TB accesses a subset of the blocks, checking
if they have joined the barrier yet or not.
*/
for (int i = threadID; i < numBlocks; i += numThreads)
{
if (reinterpret_cast<volatile int * >(inVars)[i] != 1) {
// acts as a load acquire, need TF to enforce ordering locally
__threadfence_block();
done3 = 0;
// if one of them isn't ready, don't bother checking the others (just
// increases traffic)
break;
}
}
} while (!done3);
/*
When all the necessary TBs have joined the barrier, the threads will
reconverge here -- this avoids unnecessary atomic accesses for threads
whose assigned TBs have already joined the barrier.
*/
__syncthreads();
}
/*
Helper function for main TB to set the outVars flags for all TBs at this
barrier to notify them that everyone has joined the barrier and they can
proceed.
*/
inline __device__ void setOutFlags(unsigned int * inVars,
unsigned int * outVars,
const int threadID,
const int numThreads,
const int numBlocks) {
for (int i = threadID; i < numBlocks; i += numThreads)
{
reinterpret_cast<volatile int * >(inVars)[i] = 0;
reinterpret_cast<volatile int * >(outVars)[i] = 1;
}
__syncthreads();
// outVars acts as a store release, need TF to enforce ordering
__threadfence();
}
/*
Helper function for main TB to set the outVars flags for all TBs at this
barrier to notify them that everyone has joined the barrier and they can
proceed.
*/
inline __device__ void setOutFlags_local(unsigned int * inVars,
unsigned int * outVars,
const int threadID,
const int numThreads,
const int numBlocks) {
for (int i = threadID; i < numBlocks; i += numThreads)
{
reinterpret_cast<volatile int * >(inVars)[i] = 0;
reinterpret_cast<volatile int * >(outVars)[i] = 1;
}
__syncthreads();
// outVars acts as a store release, need TF to enforce ordering locally
__threadfence_block();
}
/*
Helper function for each TB to spin waiting for its outVars flag to be set
by the main TB. When it is set, then this TB can safely exit the barrier.
*/
inline __device__ void spinOnMyOutFlag(unsigned int * inVars,
unsigned int * outVars,
const int blockID,
const int threadID) {
if (threadID == 0)
{
while (reinterpret_cast<volatile int * >(outVars)[blockID] != 1) { ; }
inVars[blockID] = outVars[blockID] = 0;
// these stores act as a store release, need TF to enforce ordering
__threadfence();
}
__syncthreads();
}
/*
Helper function for each TB to spin waiting for its outVars flag to be set
by the main TB. When it is set, then this TB can safely exit the barrier.
*/
inline __device__ void spinOnMyOutFlag_local(unsigned int * inVars,
unsigned int * outVars,
const int blockID,
const int threadID) {
if (threadID == 0)
{
while (reinterpret_cast<volatile int * >(outVars)[blockID] != 1) { ; }
inVars[blockID] = outVars[blockID] = 0;
// these stores act as a store release, need TF to enforce ordering locally
__threadfence_block();
}
__syncthreads();
}
__device__ void cudaBarrier(unsigned int * barrierBuffers,
const int arrayStride,
const unsigned int numBlocksAtBarr)
{
// local variables
const int threadID = threadIdx.x;
const int blockID = blockIdx.x;
const int numThreads = blockDim.x;
// ** NOTE: setting numBlocks like this only works if the first TB on
// each SM joins the global barrier
const int numBlocks = numBlocksAtBarr;
unsigned int * const inVars = barrierBuffers;
unsigned int * const outVars = barrierBuffers + arrayStride;
/*
Thread 0 from each TB sets its 'private' flag in the in array to 1 to
signify that it has joined the barrier.
*/
setMyInFlag(inVars, threadID, blockID);
// TB 0 is the "main" TB for the global barrier
if (blockID == 0)
{
// "main" TB loops, checking if everyone else has joined the barrier.
spinOnInFlags(inVars, threadID, numThreads, numBlocks);
/*
Once all the TBs arrive at the barrier, the main TB resets them to
notify everyone else that they can move forward beyond the barrier --
again each thread in the main TB takes a subset of the necessary TBs
and sets their in flag to 0 and out flag to 1.
*/
setOutFlags(inVars, outVars, threadID, numThreads, numBlocks);
}
/*
All TBs (including the main one) spin, checking to see if the main one
set their out location yet -- if it did, then they can move ahead
because the barrier is done.
*/
spinOnMyOutFlag(inVars, outVars, blockID, threadID);
}
// same algorithm but per-SM synchronization
__device__ void cudaBarrierLocal(// for global barrier
unsigned int * barrierBuffers,
const unsigned int numBlocksAtBarr,
const int arrayStride,
// for local barrier
unsigned int * perSMBarrierBuffers,
const unsigned int smID,
const unsigned int numTBs_perSM,
const unsigned int perSM_blockID,
const bool isLocalGlobalBarr,
const int MAX_BLOCKS)
{
// local variables
const int threadID = threadIdx.x;
const int numThreads = blockDim.x;
const int numBlocks = numTBs_perSM;
/*
Each SM has MAX_BLOCKS*2 locations in perSMBarrierBuffers, so my SM's
inVars locations start at perSMBarrierBuffers[smID*2*MAX_BLOCKS] and my
SM's outVars locations start at
perSMBarrierBuffers[smID*2*MAX_BLOCKS + MAX_BLOCKS].
*/
unsigned int * const inVars = perSMBarrierBuffers + (MAX_BLOCKS * smID * 2);
unsigned int * const outVars = perSMBarrierBuffers + ((MAX_BLOCKS * smID * 2) + MAX_BLOCKS);
/*
Thread 0 from each TB sets its 'private' flag in the in array to 1 to
signify that it has joined the barrier.
*/
setMyInFlag(inVars, threadID, perSM_blockID);
// first TB on this SM is the "main" TB for the local barrier
if (perSM_blockID == 0)
{
// "main" TB loops, checking if everyone else has joined the barrier.
spinOnInFlags_local(inVars, threadID, numThreads, numBlocks);
/*
If we are calling the global tree barrier from within the local tree
barrier, call it here. Now that all of the TBs on this SM have joined
the local barrier, TB 0 on this SM joins the global barrier.
*/
if (isLocalGlobalBarr) {
cudaBarrier(barrierBuffers, arrayStride, numBlocksAtBarr);
}
/*
Once all the TBs arrive at the barrier, the main TB resets their inVar
and sets their outVar to notify everyone else that they can move
forward beyond the barrier -- each thread in the main TB takes a subset
of the necessary TBs and sets their in flag to 0 and out flag to 1.
*/
setOutFlags_local(inVars, outVars, threadID, numThreads, numBlocks);
}
/*
All TBs (including the main one) spin, checking to see if the main TB
set their out location yet -- if it did, then they can move ahead
because the barrier is done.
*/
spinOnMyOutFlag_local(inVars, outVars, perSM_blockID, threadID);
}
/*
Decentralized tree barrier that has 1 TB per SM join the global decentralized
barrier in the middle, then sets the out flags of the others on this SM to 1
after returning. This avoids the need for a second local barrier after the
global barrier.
*/
__device__ void cudaBarrierLocalGlobal(// for global barrier
unsigned int * barrierBuffers,
const unsigned int numBlocksAtBarr,
const int arrayStride,
// for local barrier
unsigned int * perSMBarrierBuffers,
const unsigned int smID,
const unsigned int numTBs_perSM,
const unsigned int perSM_blockID,
const int MAX_BLOCKS)
{
// will call global barrier within it
cudaBarrierLocal(barrierBuffers, numBlocksAtBarr, arrayStride,
perSMBarrierBuffers, smID, numTBs_perSM, perSM_blockID,
true, MAX_BLOCKS);
}
/*
Helper function for joining the barrier with the 'lock-free' tree barrier.
*/
__device__ void joinLFBarrier_helper(unsigned int * barrierBuffers,
unsigned int * perSMBarrierBuffers,
const unsigned int numBlocksAtBarr,
const int smID,
const int perSM_blockID,
const int numTBs_perSM,
const int arrayStride,
const int MAX_BLOCKS) {
if (numTBs_perSM > 1) {
cudaBarrierLocalGlobal(barrierBuffers, numBlocksAtBarr, arrayStride,
perSMBarrierBuffers, smID, numTBs_perSM,
perSM_blockID, MAX_BLOCKS);
} else { // if only 1 TB on the SM, no need for the local barriers
cudaBarrier(barrierBuffers, arrayStride, numBlocksAtBarr);
}
}
#endif
| d2cc1ad4c8c53ab66c44ffc93411307c7e80f5c5.cu | #ifndef __CUDALOCKSBARRIERFAST_CU__
#define __CUDALOCKSBARRIERFAST_CU__
#include "cudaLocks.h"
/*
Helper function to set the passed in inVars flag to 1 (signifies that this TB
has joined the barrier).
*/
inline __device__ void setMyInFlag(unsigned int * inVars,
const unsigned int threadID,
const unsigned int blockID) {
if (threadID == 0)
{
// atomicExch acts as a store release, need TF to enforce ordering
__threadfence();
atomicExch((unsigned int *)(inVars + blockID), 1);
}
__syncthreads();
}
/*
Helper function for the main TB of this group to spin, checking to see if
all other TBs joining this barrier have joined or not.
*/
inline __device__ void spinOnInFlags(unsigned int * inVars,
const int threadID,
const int numThreads,
const int numBlocks) {
// local variables
int done3 = 1;
// "main" TB loops, checking if everyone else has joined the barrier.
do
{
done3 = 1;
/*
Each thread in the main TB accesses a subset of the blocks, checking
if they have joined the barrier yet or not.
*/
for (int i = threadID; i < numBlocks; i += numThreads)
{
if (reinterpret_cast<volatile int * >(inVars)[i] != 1) {
// acts as a load acquire, need TF to enforce ordering
__threadfence();
done3 = 0;
// if one of them isn't ready, don't bother checking the others (just
// increases traffic)
break;
}
}
} while (!done3);
/*
When all the necessary TBs have joined the barrier, the threads will
reconverge here -- this avoids unnecessary atomic accesses for threads
whose assigned TBs have already joined the barrier.
*/
__syncthreads();
}
/*
Helper function for the main TB of this group to spin, checking to see if
all other TBs joining this barrier have joined or not.
*/
inline __device__ void spinOnInFlags_local(unsigned int * inVars,
const int threadID,
const int numThreads,
const int numBlocks) {
// local variables
int done3 = 1;
// "main" TB loops, checking if everyone else has joined the barrier.
do
{
done3 = 1;
/*
Each thread in the main TB accesses a subset of the blocks, checking
if they have joined the barrier yet or not.
*/
for (int i = threadID; i < numBlocks; i += numThreads)
{
if (reinterpret_cast<volatile int * >(inVars)[i] != 1) {
// acts as a load acquire, need TF to enforce ordering locally
__threadfence_block();
done3 = 0;
// if one of them isn't ready, don't bother checking the others (just
// increases traffic)
break;
}
}
} while (!done3);
/*
When all the necessary TBs have joined the barrier, the threads will
reconverge here -- this avoids unnecessary atomic accesses for threads
whose assigned TBs have already joined the barrier.
*/
__syncthreads();
}
/*
Helper function for main TB to set the outVars flags for all TBs at this
barrier to notify them that everyone has joined the barrier and they can
proceed.
*/
inline __device__ void setOutFlags(unsigned int * inVars,
unsigned int * outVars,
const int threadID,
const int numThreads,
const int numBlocks) {
for (int i = threadID; i < numBlocks; i += numThreads)
{
reinterpret_cast<volatile int * >(inVars)[i] = 0;
reinterpret_cast<volatile int * >(outVars)[i] = 1;
}
__syncthreads();
// outVars acts as a store release, need TF to enforce ordering
__threadfence();
}
/*
Helper function for main TB to set the outVars flags for all TBs at this
barrier to notify them that everyone has joined the barrier and they can
proceed.
*/
inline __device__ void setOutFlags_local(unsigned int * inVars,
unsigned int * outVars,
const int threadID,
const int numThreads,
const int numBlocks) {
for (int i = threadID; i < numBlocks; i += numThreads)
{
reinterpret_cast<volatile int * >(inVars)[i] = 0;
reinterpret_cast<volatile int * >(outVars)[i] = 1;
}
__syncthreads();
// outVars acts as a store release, need TF to enforce ordering locally
__threadfence_block();
}
/*
Helper function for each TB to spin waiting for its outVars flag to be set
by the main TB. When it is set, then this TB can safely exit the barrier.
*/
inline __device__ void spinOnMyOutFlag(unsigned int * inVars,
unsigned int * outVars,
const int blockID,
const int threadID) {
if (threadID == 0)
{
while (reinterpret_cast<volatile int * >(outVars)[blockID] != 1) { ; }
inVars[blockID] = outVars[blockID] = 0;
// these stores act as a store release, need TF to enforce ordering
__threadfence();
}
__syncthreads();
}
/*
Helper function for each TB to spin waiting for its outVars flag to be set
by the main TB. When it is set, then this TB can safely exit the barrier.
*/
inline __device__ void spinOnMyOutFlag_local(unsigned int * inVars,
unsigned int * outVars,
const int blockID,
const int threadID) {
if (threadID == 0)
{
while (reinterpret_cast<volatile int * >(outVars)[blockID] != 1) { ; }
inVars[blockID] = outVars[blockID] = 0;
// these stores act as a store release, need TF to enforce ordering locally
__threadfence_block();
}
__syncthreads();
}
__device__ void cudaBarrier(unsigned int * barrierBuffers,
const int arrayStride,
const unsigned int numBlocksAtBarr)
{
// local variables
const int threadID = threadIdx.x;
const int blockID = blockIdx.x;
const int numThreads = blockDim.x;
// ** NOTE: setting numBlocks like this only works if the first TB on
// each SM joins the global barrier
const int numBlocks = numBlocksAtBarr;
unsigned int * const inVars = barrierBuffers;
unsigned int * const outVars = barrierBuffers + arrayStride;
/*
Thread 0 from each TB sets its 'private' flag in the in array to 1 to
signify that it has joined the barrier.
*/
setMyInFlag(inVars, threadID, blockID);
// TB 0 is the "main" TB for the global barrier
if (blockID == 0)
{
// "main" TB loops, checking if everyone else has joined the barrier.
spinOnInFlags(inVars, threadID, numThreads, numBlocks);
/*
Once all the TBs arrive at the barrier, the main TB resets them to
notify everyone else that they can move forward beyond the barrier --
again each thread in the main TB takes a subset of the necessary TBs
and sets their in flag to 0 and out flag to 1.
*/
setOutFlags(inVars, outVars, threadID, numThreads, numBlocks);
}
/*
All TBs (including the main one) spin, checking to see if the main one
set their out location yet -- if it did, then they can move ahead
because the barrier is done.
*/
spinOnMyOutFlag(inVars, outVars, blockID, threadID);
}
// same algorithm but per-SM synchronization
__device__ void cudaBarrierLocal(// for global barrier
unsigned int * barrierBuffers,
const unsigned int numBlocksAtBarr,
const int arrayStride,
// for local barrier
unsigned int * perSMBarrierBuffers,
const unsigned int smID,
const unsigned int numTBs_perSM,
const unsigned int perSM_blockID,
const bool isLocalGlobalBarr,
const int MAX_BLOCKS)
{
// local variables
const int threadID = threadIdx.x;
const int numThreads = blockDim.x;
const int numBlocks = numTBs_perSM;
/*
Each SM has MAX_BLOCKS*2 locations in perSMBarrierBuffers, so my SM's
inVars locations start at perSMBarrierBuffers[smID*2*MAX_BLOCKS] and my
SM's outVars locations start at
perSMBarrierBuffers[smID*2*MAX_BLOCKS + MAX_BLOCKS].
*/
unsigned int * const inVars = perSMBarrierBuffers + (MAX_BLOCKS * smID * 2);
unsigned int * const outVars = perSMBarrierBuffers + ((MAX_BLOCKS * smID * 2) + MAX_BLOCKS);
/*
Thread 0 from each TB sets its 'private' flag in the in array to 1 to
signify that it has joined the barrier.
*/
setMyInFlag(inVars, threadID, perSM_blockID);
// first TB on this SM is the "main" TB for the local barrier
if (perSM_blockID == 0)
{
// "main" TB loops, checking if everyone else has joined the barrier.
spinOnInFlags_local(inVars, threadID, numThreads, numBlocks);
/*
If we are calling the global tree barrier from within the local tree
barrier, call it here. Now that all of the TBs on this SM have joined
the local barrier, TB 0 on this SM joins the global barrier.
*/
if (isLocalGlobalBarr) {
cudaBarrier(barrierBuffers, arrayStride, numBlocksAtBarr);
}
/*
Once all the TBs arrive at the barrier, the main TB resets their inVar
and sets their outVar to notify everyone else that they can move
forward beyond the barrier -- each thread in the main TB takes a subset
of the necessary TBs and sets their in flag to 0 and out flag to 1.
*/
setOutFlags_local(inVars, outVars, threadID, numThreads, numBlocks);
}
/*
All TBs (including the main one) spin, checking to see if the main TB
set their out location yet -- if it did, then they can move ahead
because the barrier is done.
*/
spinOnMyOutFlag_local(inVars, outVars, perSM_blockID, threadID);
}
/*
Decentralized tree barrier that has 1 TB per SM join the global decentralized
barrier in the middle, then sets the out flags of the others on this SM to 1
after returning. This avoids the need for a second local barrier after the
global barrier.
*/
__device__ void cudaBarrierLocalGlobal(// for global barrier
unsigned int * barrierBuffers,
const unsigned int numBlocksAtBarr,
const int arrayStride,
// for local barrier
unsigned int * perSMBarrierBuffers,
const unsigned int smID,
const unsigned int numTBs_perSM,
const unsigned int perSM_blockID,
const int MAX_BLOCKS)
{
// will call global barrier within it
cudaBarrierLocal(barrierBuffers, numBlocksAtBarr, arrayStride,
perSMBarrierBuffers, smID, numTBs_perSM, perSM_blockID,
true, MAX_BLOCKS);
}
/*
Helper function for joining the barrier with the 'lock-free' tree barrier.
*/
__device__ void joinLFBarrier_helper(unsigned int * barrierBuffers,
unsigned int * perSMBarrierBuffers,
const unsigned int numBlocksAtBarr,
const int smID,
const int perSM_blockID,
const int numTBs_perSM,
const int arrayStride,
const int MAX_BLOCKS) {
if (numTBs_perSM > 1) {
cudaBarrierLocalGlobal(barrierBuffers, numBlocksAtBarr, arrayStride,
perSMBarrierBuffers, smID, numTBs_perSM,
perSM_blockID, MAX_BLOCKS);
} else { // if only 1 TB on the SM, no need for the local barriers
cudaBarrier(barrierBuffers, arrayStride, numBlocksAtBarr);
}
}
#endif
|
05f19ee1f0cda0d381837bd71eb9d58a2af3b0cd.hip | // !!! This is a file automatically generated by hipify!!!
#include<cuda.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <iostream>
#include <sstream>
__global__ void fillArray(double* array, int size, double value)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
if(i<size){
array[i]=value;
}
}
__global__ void applyDirichlet(double* load, double* csr_matrix, int* csr_col_device,int* csrRowPtr,int *isboundaryNode, unsigned int entries, int elementsX, int elementsY, int degree, double boundaryValue)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
int row=0;
int col;
int ucindex;
unsigned long int pointCount=(degree+1+(elementsX-1)*degree)*(degree+1+(elementsY-1)*degree)-1;
double sum=0;
double bound[4];
bound[0]=0;
bound[1]=0;
bound[2]=0;
bound[3]=0;
if(i<pointCount+1){
int start=csrRowPtr[i];
int end =csrRowPtr[i+1];
for(int j=start;j<end;j++){
if(isboundaryNode[csr_col_device[j]]!=0){
sum+=bound[isboundaryNode[csr_col_device[j]]-1]*csr_matrix[j];
}
}
load[i]-=sum;
}
__syncthreads();
if(i<entries){
col=csr_col_device[i];
while((row<=pointCount) && (csrRowPtr[row]<=i)){
row++;
}
row--;
if((isboundaryNode[col]!=0)||(isboundaryNode[row]!=0)){
if(col!=row){
csr_matrix[i]=0;
}
}
__syncthreads();
if((isboundaryNode[col]!=0)||(isboundaryNode[row]!=0)){
if(col==row){
csr_matrix[i]=1;
}
}
}
}
__global__ void vectorDirichlet(double* load, double* csr_matrix, int* csr_col_device,int* csrRowPtr,int *isboundaryNode,unsigned int entries, int elementsX, int elementsY, int degree, double boundaryValue)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
int row=0;
int col;
int ucindex;
unsigned long int pointCount=(degree+1+(elementsX-1)*degree)*(degree+1+(elementsY-1)*degree)-1;
double sum=0;
double bound[4];
bound[0]=0;
bound[1]=0;
bound[2]=0;
bound[3]=0;
if(i<entries){
col=csr_col_device[i];
while((row<=pointCount) && (csrRowPtr[row]<=i)){
row++;
}
row--;
if((isboundaryNode[col]!=0)||(isboundaryNode[row]!=0)){
if(col==row){
load[col]=bound[isboundaryNode[col]-1];
}
}
}
}
__global__ void BernBinomCoeff(double *M, int n)
{
unsigned int i= threadIdx.x;
unsigned int j= threadIdx.y;
unsigned int top_0=1;
unsigned int top_1=1;
unsigned int bottom=1;
unsigned int n_save=n;
//guarantees that every step in the solution is smaller than the final solution thus avoiding overflow
for (int d=1; d <= i; d++){
top_0*= n_save--;
top_0 /= d;
}
n_save=n;
for (int d=1; d <= j; d++) {
top_1*= n_save--;
top_1 /= d;
}
n_save=2*n;
for (int d=1; d <= i+j; d++) {
bottom*= n_save--;
bottom /= d;
}
M[i+j*(n+1)]=(double)(top_0*top_1)/bottom;
}
__global__ void ass_A_exact(double a, double b,int *coo_row_device,int *coo_col_device, double*coo_value, int degree,long long int* index, int *elements, double *M, double *M_m, int elementsX, int elementsY)
{
unsigned long int pointCount=(degree+1+(elementsX-1)*degree)*(degree+1+(elementsY-1)*degree);
double *B;
B=(double*)malloc((degree+1)*(degree+1)*(degree+1)*(degree+1)*sizeof(double));
unsigned int i_glob;
unsigned int j_glob;
unsigned int shift;
double sum=0;
unsigned int element=threadIdx.x+blockIdx.x*blockDim.x;
int n=degree;
if(element<elementsX*elementsY){
for (int i=0; i<=n;i++)
for(int j=0; j<=n;j++)
for (int k=0; k<=n;k++)
for(int l=0; l<=n;l++){
sum=0;
shift=i+j*(degree+1)+(degree+1)*(degree+1)*(k+l*(degree+1));
if((i<n) && (k<n))
sum+=M_m[i+n*k];
if((i>0) && (i-1<n) && (k<n))
sum-=M_m[i-1+n*k];
if((k>0)&& (i<n) && (k-1<n))
sum-=M_m[i+n*(k-1)];
if((k>0) && (i>0) && (i-1<n)&& (k-1<n))
sum+=M_m[i-1+n*(k-1)];
B[shift]=M[j+l*(n+1)]*b/a*sum;
sum=0;
if((j<n) && (l<n))
sum=M_m[j+n*l];
if((j>0) && (j-1<n) && (l<n))
sum-=M_m[j-1+n*l];
if((l>0)&& (j<n) && (l-1<n))
sum-=M_m[j+n*(l-1)];
if((l>0) && (j>0) && (j-1<n)&& (l-1<n))
sum+=M_m[j-1+n*(l-1)];
B[shift]+=M[i+k*(n+1)]*a/b*(sum);
B[shift]*=(double)(n*n)/(4*n*n-1);
B[shift]*=(double)(n*n)/(4*n*n-1);
}
for(int i=0; i<(n+1)*(n+1);i++){
for(int j=0; j<(n+1)*(n+1);j++){
i_glob=elements[element*(n+1)*(n+1)+i];
j_glob=elements[element*(n+1)*(n+1)+j];
coo_row_device[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=i_glob;
coo_col_device[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=j_glob;
index[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=i_glob*pointCount+j_glob;
coo_value[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=B[i+j*(n+1)*(n+1)];
}
}
}
free(B);
}
__global__ void split(long long int* index, int*cols, int*rows,unsigned long int pointCount,unsigned long int length)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
if(i<length){
rows[i]=index[i]/pointCount;
cols[i]=index[i]%pointCount;
}
}
__global__ void loadVector(double* loadList, int* index,int *elements, double a, double b, int degree, double func, int ElementCount)
{
int i_glob;
int element=threadIdx.x+blockIdx.x*blockDim.x;
if(element<ElementCount){
for(int i=0;i<(degree+1)*(degree+1);i++){
loadList[element*(degree+1)*(degree+1)+i]=func*a*b/((degree+1)*(degree+1));
index[element*(degree+1)*(degree+1)+i]=elements[element*(degree+1)*(degree+1)+i];
}
}
} | 05f19ee1f0cda0d381837bd71eb9d58a2af3b0cd.cu | #include<cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <iostream>
#include <sstream>
__global__ void fillArray(double* array, int size, double value)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
if(i<size){
array[i]=value;
}
}
__global__ void applyDirichlet(double* load, double* csr_matrix, int* csr_col_device,int* csrRowPtr,int *isboundaryNode, unsigned int entries, int elementsX, int elementsY, int degree, double boundaryValue)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
int row=0;
int col;
int ucindex;
unsigned long int pointCount=(degree+1+(elementsX-1)*degree)*(degree+1+(elementsY-1)*degree)-1;
double sum=0;
double bound[4];
bound[0]=0;
bound[1]=0;
bound[2]=0;
bound[3]=0;
if(i<pointCount+1){
int start=csrRowPtr[i];
int end =csrRowPtr[i+1];
for(int j=start;j<end;j++){
if(isboundaryNode[csr_col_device[j]]!=0){
sum+=bound[isboundaryNode[csr_col_device[j]]-1]*csr_matrix[j];
}
}
load[i]-=sum;
}
__syncthreads();
if(i<entries){
col=csr_col_device[i];
while((row<=pointCount) && (csrRowPtr[row]<=i)){
row++;
}
row--;
if((isboundaryNode[col]!=0)||(isboundaryNode[row]!=0)){
if(col!=row){
csr_matrix[i]=0;
}
}
__syncthreads();
if((isboundaryNode[col]!=0)||(isboundaryNode[row]!=0)){
if(col==row){
csr_matrix[i]=1;
}
}
}
}
__global__ void vectorDirichlet(double* load, double* csr_matrix, int* csr_col_device,int* csrRowPtr,int *isboundaryNode,unsigned int entries, int elementsX, int elementsY, int degree, double boundaryValue)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
int row=0;
int col;
int ucindex;
unsigned long int pointCount=(degree+1+(elementsX-1)*degree)*(degree+1+(elementsY-1)*degree)-1;
double sum=0;
double bound[4];
bound[0]=0;
bound[1]=0;
bound[2]=0;
bound[3]=0;
if(i<entries){
col=csr_col_device[i];
while((row<=pointCount) && (csrRowPtr[row]<=i)){
row++;
}
row--;
if((isboundaryNode[col]!=0)||(isboundaryNode[row]!=0)){
if(col==row){
load[col]=bound[isboundaryNode[col]-1];
}
}
}
}
__global__ void BernBinomCoeff(double *M, int n)
{
unsigned int i= threadIdx.x;
unsigned int j= threadIdx.y;
unsigned int top_0=1;
unsigned int top_1=1;
unsigned int bottom=1;
unsigned int n_save=n;
//guarantees that every step in the solution is smaller than the final solution thus avoiding overflow
for (int d=1; d <= i; d++){
top_0*= n_save--;
top_0 /= d;
}
n_save=n;
for (int d=1; d <= j; d++) {
top_1*= n_save--;
top_1 /= d;
}
n_save=2*n;
for (int d=1; d <= i+j; d++) {
bottom*= n_save--;
bottom /= d;
}
M[i+j*(n+1)]=(double)(top_0*top_1)/bottom;
}
__global__ void ass_A_exact(double a, double b,int *coo_row_device,int *coo_col_device, double*coo_value, int degree,long long int* index, int *elements, double *M, double *M_m, int elementsX, int elementsY)
{
unsigned long int pointCount=(degree+1+(elementsX-1)*degree)*(degree+1+(elementsY-1)*degree);
double *B;
B=(double*)malloc((degree+1)*(degree+1)*(degree+1)*(degree+1)*sizeof(double));
unsigned int i_glob;
unsigned int j_glob;
unsigned int shift;
double sum=0;
unsigned int element=threadIdx.x+blockIdx.x*blockDim.x;
int n=degree;
if(element<elementsX*elementsY){
for (int i=0; i<=n;i++)
for(int j=0; j<=n;j++)
for (int k=0; k<=n;k++)
for(int l=0; l<=n;l++){
sum=0;
shift=i+j*(degree+1)+(degree+1)*(degree+1)*(k+l*(degree+1));
if((i<n) && (k<n))
sum+=M_m[i+n*k];
if((i>0) && (i-1<n) && (k<n))
sum-=M_m[i-1+n*k];
if((k>0)&& (i<n) && (k-1<n))
sum-=M_m[i+n*(k-1)];
if((k>0) && (i>0) && (i-1<n)&& (k-1<n))
sum+=M_m[i-1+n*(k-1)];
B[shift]=M[j+l*(n+1)]*b/a*sum;
sum=0;
if((j<n) && (l<n))
sum=M_m[j+n*l];
if((j>0) && (j-1<n) && (l<n))
sum-=M_m[j-1+n*l];
if((l>0)&& (j<n) && (l-1<n))
sum-=M_m[j+n*(l-1)];
if((l>0) && (j>0) && (j-1<n)&& (l-1<n))
sum+=M_m[j-1+n*(l-1)];
B[shift]+=M[i+k*(n+1)]*a/b*(sum);
B[shift]*=(double)(n*n)/(4*n*n-1);
B[shift]*=(double)(n*n)/(4*n*n-1);
}
for(int i=0; i<(n+1)*(n+1);i++){
for(int j=0; j<(n+1)*(n+1);j++){
i_glob=elements[element*(n+1)*(n+1)+i];
j_glob=elements[element*(n+1)*(n+1)+j];
coo_row_device[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=i_glob;
coo_col_device[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=j_glob;
index[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=i_glob*pointCount+j_glob;
coo_value[element*(n+1)*(n+1)*(n+1)*(n+1)+i+j*(n+1)*(n+1)]=B[i+j*(n+1)*(n+1)];
}
}
}
free(B);
}
__global__ void split(long long int* index, int*cols, int*rows,unsigned long int pointCount,unsigned long int length)
{
unsigned int i=threadIdx.x+blockIdx.x*blockDim.x;
if(i<length){
rows[i]=index[i]/pointCount;
cols[i]=index[i]%pointCount;
}
}
__global__ void loadVector(double* loadList, int* index,int *elements, double a, double b, int degree, double func, int ElementCount)
{
int i_glob;
int element=threadIdx.x+blockIdx.x*blockDim.x;
if(element<ElementCount){
for(int i=0;i<(degree+1)*(degree+1);i++){
loadList[element*(degree+1)*(degree+1)+i]=func*a*b/((degree+1)*(degree+1));
index[element*(degree+1)*(degree+1)+i]=elements[element*(degree+1)*(degree+1)+i];
}
}
} |
56ca6ea49f9343c1858c7bc0989971e66906d9b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=16 --blockDim=64
#include "common.h"
__global__ void
d_boxfilter_rgba_y(unsigned int *id, unsigned int *od, int w, int h, int r)
{
__requires(w == 1024);
__requires(h == 1024);
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id = &id[x];
od = &od[x];
float scale = 1.0f / (float)((r << 1) + 1);
float4 t;
// do left edge
t = rgbaIntToFloat(id[0]) * r;
for (int y = 0; y < (r + 1); y++)
{
t += rgbaIntToFloat(id[y*w]);
}
od[0] = rgbaFloatToInt(t * scale);
for (int y = 1; y < (r + 1); y++)
{
t += rgbaIntToFloat(id[(y + r) * w]);
t -= rgbaIntToFloat(id[0]);
od[y * w] = rgbaFloatToInt(t * scale);
}
// main loop
for (int y = (r + 1); y < (h - r); y++)
{
t += rgbaIntToFloat(id[(y + r) * w]);
t -= rgbaIntToFloat(id[((y - r) * w) - w]);
od[y * w] = rgbaFloatToInt(t * scale);
}
// do right edge
for (int y = h - r; y < h; y++)
{
t += rgbaIntToFloat(id[(h - 1) * w]);
t -= rgbaIntToFloat(id[((y - r) * w) - w]);
od[y * w] = rgbaFloatToInt(t * scale);
}
}
| 56ca6ea49f9343c1858c7bc0989971e66906d9b2.cu | //pass
//--gridDim=16 --blockDim=64
#include "common.h"
__global__ void
d_boxfilter_rgba_y(unsigned int *id, unsigned int *od, int w, int h, int r)
{
__requires(w == 1024);
__requires(h == 1024);
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id = &id[x];
od = &od[x];
float scale = 1.0f / (float)((r << 1) + 1);
float4 t;
// do left edge
t = rgbaIntToFloat(id[0]) * r;
for (int y = 0; y < (r + 1); y++)
{
t += rgbaIntToFloat(id[y*w]);
}
od[0] = rgbaFloatToInt(t * scale);
for (int y = 1; y < (r + 1); y++)
{
t += rgbaIntToFloat(id[(y + r) * w]);
t -= rgbaIntToFloat(id[0]);
od[y * w] = rgbaFloatToInt(t * scale);
}
// main loop
for (int y = (r + 1); y < (h - r); y++)
{
t += rgbaIntToFloat(id[(y + r) * w]);
t -= rgbaIntToFloat(id[((y - r) * w) - w]);
od[y * w] = rgbaFloatToInt(t * scale);
}
// do right edge
for (int y = h - r; y < h; y++)
{
t += rgbaIntToFloat(id[(h - 1) * w]);
t -= rgbaIntToFloat(id[((y - r) * w) - w]);
od[y * w] = rgbaFloatToInt(t * scale);
}
}
|
51f93bb025b8e9ed0d9f880c89b77ee1ff06c536.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "NearestNeighbors.h"
#include "GPUErrors.h"
#define DIMX 256 //Should be equal to number of threads/block.
__global__ void gpuSharedNN(float3* g_Points, int* g_ClosestPointIndex, int count)
{
__shared__ float3 tile[DIMX];
float NeighborClosest, distance;
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < count)
{
NeighborClosest = 3.40282E38f;
tile[DIMX] = g_Points[ix];
int temp, temp1;
__syncthreads();
//Loop through every point again
for (int j = 0; j < count / DIMX; j++)
{
int starti = j * DIMX;
int endi = starti + DIMX;
for (int i = starti; i < endi; i++)
{
//Do not check distance between the same point
if (i ^ ix)
{
//Compute the distance between Points[ix] and Points[i]
distance = sqrtf((g_Points[ix].x - tile[i].x) * (g_Points[ix].x - tile[i].x) + (g_Points[ix].y - tile[i].y) * (g_Points[ix].y - tile[i].y) + (g_Points[ix].z - tile[i].z) * (g_Points[ix].z - tile[i].z));
//Is the computed distance nearest
if (distance < NeighborClosest)
{
//Update the nearest neighbor distance
NeighborClosest = distance;
//Update the index of the nearest neighbor
temp = i;
}
}
if (temp < temp1)
{
temp1 = temp;
}
}
g_ClosestPointIndex[ix] = temp1;
}
}
}
//Host Function
__host__ void GPUSharedNearestNeighbors(float3* Points, int* ClosestPointIndex, int PointCount)
{
hipEvent_t kernel_start;
hipEvent_t kernel_stop;
float fElapsedTime = 0.0f;
float fMemoryTransferTime = 0.0f;
const int SizeInBytes = PointCount * sizeof(float3);
const int SizeInBytes1 = PointCount * sizeof(int);
HandleCUDAError(hipEventCreate(&kernel_start));
HandleCUDAError(hipEventCreate(&kernel_stop));
float3* d_Points;
int* d_ClosestPointIndex;
//Allocate device memory on the global memory
HandleCUDAError(hipMalloc((void**)&d_Points, SizeInBytes));
HandleCUDAError(hipMalloc((void**)&d_ClosestPointIndex, SizeInBytes1));
/*transfer data from CPU Memory to GPU Memory,measure the memory copy time, and
store in the fMemoryTransferTime*/
chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
HandleCUDAError(hipMemcpy(d_Points, Points, SizeInBytes, hipMemcpyHostToDevice));
end = std::chrono::system_clock::now();
//Store the memory copy time in the variable
std::chrono::duration<float> elapsed_seconds = end - start;
fMemoryTransferTime = elapsed_seconds.count() * 1000.0f;
//cout << "\nGPU Computation using only Global Memory" << endl;
//Develop Block and Grid Parameters and display
int dimx = 256;
dim3 block(dimx);
dim3 grid(PointCount + block.x - 1 / block.x);
//Computations using only the global memory
//Launch the start Event Timer
HandleCUDAError(hipEventRecord(kernel_start));
//Launch the kernel
gpuSharedNN << <grid, block >> > (d_Points, d_ClosestPointIndex, PointCount);
//Launch the stop Event Timer
HandleCUDAError(hipEventRecord(kernel_stop));
HandleCUDAError(hipEventSynchronize(kernel_stop));
//Block the CPU for the stop event to occur
GetCUDARunTimeError();
//Compute the Elapsed Time
HandleCUDAError(hipEventElapsedTime(&fElapsedTime, kernel_start, kernel_stop));
/*transfer data from GPU Memory to CPU Memory, measure the memory copy time, and
update the fMemoryTransferTime*/
start = std::chrono::system_clock::now();
HandleCUDAError(hipMemcpy(ClosestPointIndex, d_ClosestPointIndex, SizeInBytes1, hipMemcpyDeviceToHost));
end = std::chrono::system_clock::now();
std::chrono::duration<float> elapsed_seconds1 = end - start;
fMemoryTransferTime += elapsed_seconds1.count() * 1000.0f;
cout << "\tTotal Memory Transfer Time (H->D and H<-D): " << fMemoryTransferTime << " msecs" << endl;
cout << "\tShared memory GPU Nearest Neighborhood Computation Time: " << fElapsedTime << " msecs" << endl;
//Release the memory on the GPU
HandleCUDAError(hipFree(d_Points));
HandleCUDAError(hipFree(d_ClosestPointIndex));
HandleCUDAError(hipEventDestroy(kernel_start));
HandleCUDAError(hipEventDestroy(kernel_stop));
//Reset the GPU device
HandleCUDAError(hipDeviceReset());
} | 51f93bb025b8e9ed0d9f880c89b77ee1ff06c536.cu | #include "NearestNeighbors.h"
#include "GPUErrors.h"
#define DIMX 256 //Should be equal to number of threads/block.
__global__ void gpuSharedNN(float3* g_Points, int* g_ClosestPointIndex, int count)
{
__shared__ float3 tile[DIMX];
float NeighborClosest, distance;
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < count)
{
NeighborClosest = 3.40282E38f;
tile[DIMX] = g_Points[ix];
int temp, temp1;
__syncthreads();
//Loop through every point again
for (int j = 0; j < count / DIMX; j++)
{
int starti = j * DIMX;
int endi = starti + DIMX;
for (int i = starti; i < endi; i++)
{
//Do not check distance between the same point
if (i ^ ix)
{
//Compute the distance between Points[ix] and Points[i]
distance = sqrtf((g_Points[ix].x - tile[i].x) * (g_Points[ix].x - tile[i].x) + (g_Points[ix].y - tile[i].y) * (g_Points[ix].y - tile[i].y) + (g_Points[ix].z - tile[i].z) * (g_Points[ix].z - tile[i].z));
//Is the computed distance nearest
if (distance < NeighborClosest)
{
//Update the nearest neighbor distance
NeighborClosest = distance;
//Update the index of the nearest neighbor
temp = i;
}
}
if (temp < temp1)
{
temp1 = temp;
}
}
g_ClosestPointIndex[ix] = temp1;
}
}
}
//Host Function
__host__ void GPUSharedNearestNeighbors(float3* Points, int* ClosestPointIndex, int PointCount)
{
cudaEvent_t kernel_start;
cudaEvent_t kernel_stop;
float fElapsedTime = 0.0f;
float fMemoryTransferTime = 0.0f;
const int SizeInBytes = PointCount * sizeof(float3);
const int SizeInBytes1 = PointCount * sizeof(int);
HandleCUDAError(cudaEventCreate(&kernel_start));
HandleCUDAError(cudaEventCreate(&kernel_stop));
float3* d_Points;
int* d_ClosestPointIndex;
//Allocate device memory on the global memory
HandleCUDAError(cudaMalloc((void**)&d_Points, SizeInBytes));
HandleCUDAError(cudaMalloc((void**)&d_ClosestPointIndex, SizeInBytes1));
/*transfer data from CPU Memory to GPU Memory,measure the memory copy time, and
store in the fMemoryTransferTime*/
chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
HandleCUDAError(cudaMemcpy(d_Points, Points, SizeInBytes, cudaMemcpyHostToDevice));
end = std::chrono::system_clock::now();
//Store the memory copy time in the variable
std::chrono::duration<float> elapsed_seconds = end - start;
fMemoryTransferTime = elapsed_seconds.count() * 1000.0f;
//cout << "\nGPU Computation using only Global Memory" << endl;
//Develop Block and Grid Parameters and display
int dimx = 256;
dim3 block(dimx);
dim3 grid(PointCount + block.x - 1 / block.x);
//Computations using only the global memory
//Launch the start Event Timer
HandleCUDAError(cudaEventRecord(kernel_start));
//Launch the kernel
gpuSharedNN << <grid, block >> > (d_Points, d_ClosestPointIndex, PointCount);
//Launch the stop Event Timer
HandleCUDAError(cudaEventRecord(kernel_stop));
HandleCUDAError(cudaEventSynchronize(kernel_stop));
//Block the CPU for the stop event to occur
GetCUDARunTimeError();
//Compute the Elapsed Time
HandleCUDAError(cudaEventElapsedTime(&fElapsedTime, kernel_start, kernel_stop));
/*transfer data from GPU Memory to CPU Memory, measure the memory copy time, and
update the fMemoryTransferTime*/
start = std::chrono::system_clock::now();
HandleCUDAError(cudaMemcpy(ClosestPointIndex, d_ClosestPointIndex, SizeInBytes1, cudaMemcpyDeviceToHost));
end = std::chrono::system_clock::now();
std::chrono::duration<float> elapsed_seconds1 = end - start;
fMemoryTransferTime += elapsed_seconds1.count() * 1000.0f;
cout << "\tTotal Memory Transfer Time (H->D and H<-D): " << fMemoryTransferTime << " msecs" << endl;
cout << "\tShared memory GPU Nearest Neighborhood Computation Time: " << fElapsedTime << " msecs" << endl;
//Release the memory on the GPU
HandleCUDAError(cudaFree(d_Points));
HandleCUDAError(cudaFree(d_ClosestPointIndex));
HandleCUDAError(cudaEventDestroy(kernel_start));
HandleCUDAError(cudaEventDestroy(kernel_stop));
//Reset the GPU device
HandleCUDAError(cudaDeviceReset());
} |
46e77fedb46dece6e7cff1112026a855232e4d67.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaRGB.h"
//-------------------------------------------------------------------------------------------------------------------------
__global__ void RGBToRGBAf(uchar3* srcImage,
float4* dstImage,
int width, int height)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const int pixel = y * width + x;
if( x >= width )
return;
if( y >= height )
return;
// printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel);
const float s = 1.0f;
const uchar3 px = srcImage[pixel];
dstImage[pixel] = make_float4(px.x * s, px.y * s, px.z * s, 255.0f * s);
}
hipError_t cudaRGBToRGBAf( uchar3* srcDev, float4* destDev, size_t width, size_t height )
{
if( !srcDev || !destDev )
return hipErrorInvalidDevicePointer;
const dim3 blockDim(8,8,1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1);
hipLaunchKernelGGL(( RGBToRGBAf), dim3(gridDim), dim3(blockDim), 0, 0, srcDev, destDev, width, height );
return CUDA(hipGetLastError());
}
//-------------------------------------------------------------------------------------------------------------------------
__global__ void RGBToRGBA8(float4* srcImage,
uchar4* dstImage,
int width, int height,
float scaling_factor)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const int pixel = y * width + x;
if( x >= width )
return;
if( y >= height )
return;
const float4 px = srcImage[pixel];
dstImage[pixel] = make_uchar4(px.x * scaling_factor,
px.y * scaling_factor,
px.z * scaling_factor,
px.w * scaling_factor);
}
hipError_t cudaRGBAToRGBA8( float4* srcDev, uchar4* destDev, size_t width, size_t height, const float2& inputRange )
{
if( !srcDev || !destDev )
return hipErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return hipErrorInvalidValue;
const float multiplier = 255.0f / inputRange.y;
const dim3 blockDim(8,8,1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1);
hipLaunchKernelGGL(( RGBToRGBA8), dim3(gridDim), dim3(blockDim), 0, 0, srcDev, destDev, width, height, multiplier );
return CUDA(hipGetLastError());
}
hipError_t cudaRGBAToRGBA8( float4* srcDev, uchar4* destDev, size_t width, size_t height )
{
return cudaRGBAToRGBA8(srcDev, destDev, width, height, make_float2(0.0f, 255.0f));
}
| 46e77fedb46dece6e7cff1112026a855232e4d67.cu | /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaRGB.h"
//-------------------------------------------------------------------------------------------------------------------------
__global__ void RGBToRGBAf(uchar3* srcImage,
float4* dstImage,
int width, int height)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const int pixel = y * width + x;
if( x >= width )
return;
if( y >= height )
return;
// printf("cuda thread %i %i %i %i pixel %i \n", x, y, width, height, pixel);
const float s = 1.0f;
const uchar3 px = srcImage[pixel];
dstImage[pixel] = make_float4(px.x * s, px.y * s, px.z * s, 255.0f * s);
}
cudaError_t cudaRGBToRGBAf( uchar3* srcDev, float4* destDev, size_t width, size_t height )
{
if( !srcDev || !destDev )
return cudaErrorInvalidDevicePointer;
const dim3 blockDim(8,8,1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1);
RGBToRGBAf<<<gridDim, blockDim>>>( srcDev, destDev, width, height );
return CUDA(cudaGetLastError());
}
//-------------------------------------------------------------------------------------------------------------------------
__global__ void RGBToRGBA8(float4* srcImage,
uchar4* dstImage,
int width, int height,
float scaling_factor)
{
const int x = (blockIdx.x * blockDim.x) + threadIdx.x;
const int y = (blockIdx.y * blockDim.y) + threadIdx.y;
const int pixel = y * width + x;
if( x >= width )
return;
if( y >= height )
return;
const float4 px = srcImage[pixel];
dstImage[pixel] = make_uchar4(px.x * scaling_factor,
px.y * scaling_factor,
px.z * scaling_factor,
px.w * scaling_factor);
}
cudaError_t cudaRGBAToRGBA8( float4* srcDev, uchar4* destDev, size_t width, size_t height, const float2& inputRange )
{
if( !srcDev || !destDev )
return cudaErrorInvalidDevicePointer;
if( width == 0 || height == 0 )
return cudaErrorInvalidValue;
const float multiplier = 255.0f / inputRange.y;
const dim3 blockDim(8,8,1);
const dim3 gridDim(iDivUp(width,blockDim.x), iDivUp(height,blockDim.y), 1);
RGBToRGBA8<<<gridDim, blockDim>>>( srcDev, destDev, width, height, multiplier );
return CUDA(cudaGetLastError());
}
cudaError_t cudaRGBAToRGBA8( float4* srcDev, uchar4* destDev, size_t width, size_t height )
{
return cudaRGBAToRGBA8(srcDev, destDev, width, height, make_float2(0.0f, 255.0f));
}
|
7fc8dcd425abbfad4d1be9bff726e1b55a1796bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*
* eventRecord.cu
*
* Microbenchmark for throughput of event recording
*
* Build with: nvcc -I ../chLib <options> eventRecord.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2014, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include "chError.h"
#include "chTimer.h"
__global__
void
NullKernel()
{
}
#define EVENTRECORD_LAUNCH 0x01
#define EVENTRECORD_BLOCKING 0x02
template<int Flags>
double
usPerLaunch( int cIterations, int cEvents )
{
hipError_t status;
double microseconds, ret;
hipEvent_t *events = new hipEvent_t[cEvents];
chTimerTimestamp start, stop;
if ( ! events ) goto Error;
memset( events, 0, cEvents*sizeof(hipEvent_t) );
for ( int i = 0; i < cEvents; i++ ) {
CUDART_CHECK( hipEventCreateWithFlags( &events[i], (Flags & EVENTRECORD_BLOCKING) ? hipEventBlockingSync : 0 ) );
}
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
if ( Flags & EVENTRECORD_LAUNCH)hipLaunchKernelGGL(( NullKernel), dim3(1),dim3(1), 0, 0, );
for ( int j = 0; j < cEvents; j++ ) {
CUDART_CHECK( hipEventRecord( events[j], NULL ) );
}
}
CUDART_CHECK( hipDeviceSynchronize() );
chTimerGetTime( &stop );
microseconds = 1e6*chTimerElapsedTime( &start, &stop );
if ( cEvents ) cIterations *= cEvents;
ret = microseconds / (float) cIterations;
Error:
if ( events ) {
for ( int i = 0; i < cEvents; i++ ) {
hipEventDestroy( events[i] );
}
}
delete[] events;
return (status) ? 0.0 : ret;
}
int
main( int argc, char *argv[] )
{
hipFree( 0 );
const int cIterations = 10000;
printf( "Measuring blocking event record overhead...\n" ); fflush( stdout );
printf( "#events\tus per event signaling\n" );
for ( int cEvents = 0; cEvents < 5; cEvents += 1 ) {
printf( "%d\t%.2f\n", cEvents*10, usPerLaunch<EVENTRECORD_BLOCKING>(cIterations, cEvents) );
}
printf( "Measuring asynchronous launch+event signaling...\n" ); fflush( stdout );
for ( int cEvents = 0; cEvents < 5; cEvents += 1 ) {
printf( "%d\t%.2f\n", cEvents*10, usPerLaunch<EVENTRECORD_LAUNCH | EVENTRECORD_BLOCKING>(cIterations, cEvents) );
}
return 0;
}
| 7fc8dcd425abbfad4d1be9bff726e1b55a1796bf.cu | /*
*
* eventRecord.cu
*
* Microbenchmark for throughput of event recording
*
* Build with: nvcc -I ../chLib <options> eventRecord.cu
* Requires: No minimum SM requirement.
*
* Copyright (c) 2011-2014, Archaea Software, LLC.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include <stdio.h>
#include "chError.h"
#include "chTimer.h"
__global__
void
NullKernel()
{
}
#define EVENTRECORD_LAUNCH 0x01
#define EVENTRECORD_BLOCKING 0x02
template<int Flags>
double
usPerLaunch( int cIterations, int cEvents )
{
cudaError_t status;
double microseconds, ret;
cudaEvent_t *events = new cudaEvent_t[cEvents];
chTimerTimestamp start, stop;
if ( ! events ) goto Error;
memset( events, 0, cEvents*sizeof(cudaEvent_t) );
for ( int i = 0; i < cEvents; i++ ) {
CUDART_CHECK( cudaEventCreateWithFlags( &events[i], (Flags & EVENTRECORD_BLOCKING) ? cudaEventBlockingSync : 0 ) );
}
chTimerGetTime( &start );
for ( int i = 0; i < cIterations; i++ ) {
if ( Flags & EVENTRECORD_LAUNCH) NullKernel<<<1,1>>>();
for ( int j = 0; j < cEvents; j++ ) {
CUDART_CHECK( cudaEventRecord( events[j], NULL ) );
}
}
CUDART_CHECK( cudaThreadSynchronize() );
chTimerGetTime( &stop );
microseconds = 1e6*chTimerElapsedTime( &start, &stop );
if ( cEvents ) cIterations *= cEvents;
ret = microseconds / (float) cIterations;
Error:
if ( events ) {
for ( int i = 0; i < cEvents; i++ ) {
cudaEventDestroy( events[i] );
}
}
delete[] events;
return (status) ? 0.0 : ret;
}
int
main( int argc, char *argv[] )
{
cudaFree( 0 );
const int cIterations = 10000;
printf( "Measuring blocking event record overhead...\n" ); fflush( stdout );
printf( "#events\tus per event signaling\n" );
for ( int cEvents = 0; cEvents < 5; cEvents += 1 ) {
printf( "%d\t%.2f\n", cEvents*10, usPerLaunch<EVENTRECORD_BLOCKING>(cIterations, cEvents) );
}
printf( "Measuring asynchronous launch+event signaling...\n" ); fflush( stdout );
for ( int cEvents = 0; cEvents < 5; cEvents += 1 ) {
printf( "%d\t%.2f\n", cEvents*10, usPerLaunch<EVENTRECORD_LAUNCH | EVENTRECORD_BLOCKING>(cIterations, cEvents) );
}
return 0;
}
|
99f9c5b4c12fffcd4b23beefc361ef2f7903699d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPEvent.h>
#include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
#include <ATen/native/Copy.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <THH/THH.h>
#include <hip/hip_fp16.h>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#include <ATen/native/hip/arc_flag.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#define nTPB 512
#define per_threads 256
#define nthreads 256
#define nblocks 256
#define find(n) (32 * (unsigned int)(n / 1024) + (n % 32))
#define mask(n) (0x80000000 >> (unsigned int)((n % 1024) / 32))
namespace at {
namespace native {
using namespace at::cuda;
__global__ void half_scale(float *din, __half *dout, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) dout[idx] = __float2half(din[idx]);
}
__global__ void float_scale(__half *din, float *dout, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) dout[idx] = __half2float(din[idx]);
}
__global__ void double_scale(__half *din, double *dout, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) dout[idx] = (double)__half2float(din[idx]);
}
__global__ void zero_mask(float *din, unsigned int *bit, unsigned int *pos, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) {
if (din[idx] != 0.0f) {
atomicAdd(&bit[find(idx)], mask(idx));
atomicAdd(&pos[(unsigned int)(idx / 32)], 1);
}
}
}
__global__ void pos_first(unsigned int* pos, int asize) {
int total_idx = nblocks * nthreads;
for (int j = 0; j < (asize / per_threads / total_idx + 1); j++) {
int global_idx = threadIdx.x + blockIdx.x * blockDim.x;
if ((global_idx + 1) * per_threads - 1 <= asize) {
for (int i = 0; i < per_threads; i++) {
int idx = global_idx * per_threads + i;
if (idx % per_threads != 0) {
pos[idx] += pos[idx - 1];
}
}
}
}
}
__global__ void pos_second(unsigned int* pos, unsigned int* opos, int asize) {
int total_idx = nblocks * nthreads;
for (int j = 0; j < (asize / per_threads / total_idx + 1); j++) {
int global_idx = threadIdx.x + blockIdx.x * blockDim.x;
if ((global_idx + 1) * per_threads - 1 <= asize) {
unsigned int temp = 0;
for (int i = 0; i < global_idx; i++) {
int idx = (i + 1) * per_threads - 1;
temp += pos[idx];
}
for (int i = 0; i < per_threads; i++) {
int idx = (global_idx) * per_threads + i;
opos[idx] = pos[idx] + temp;
}
}
}
}
__global__ void zero_insert_double(unsigned int *bit, unsigned int *nz_pos, float* din, double *dout, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) {
int count = -1;
if ((unsigned int)(bit[find(idx)] & mask(idx)) > 0) {
for (int i = (int)(idx / 32) * 32; i < idx + 1; i++) {
unsigned int mask = bit[find(i)] & mask(i);
if (mask > 0) count += 1;
}
}
if (count == -1) dout[idx] = 0.0;
else {
if ((unsigned int)(idx / 32) == 0) {
dout[idx] = (double)din[count + 0];
} else {
dout[idx] = (double)din[count + nz_pos[(unsigned int)(idx / 32) - 1]];
}
}
}
}
__global__ void zero_insert_float(unsigned int *bit, unsigned int *nz_pos, float* din, float *dout, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) {
int count = -1;
if ((unsigned int)(bit[find(idx)] & mask(idx)) > 0) {
for (int i = (int)(idx / 32) * 32; i < idx + 1; i++) {
unsigned int mask = bit[find(i)] & mask(i);
if (mask > 0) count += 1;
}
}
if (count == -1) dout[idx] = 0.0f;
else {
if ((unsigned int)(idx / 32) == 0) {
dout[idx] = din[count + 0];
} else {
dout[idx] = din[count + nz_pos[(unsigned int)(idx / 32) - 1]];
}
}
}
}
struct is_not_zero {
__host__ __device__
bool operator()(const float x) {
return (x != 0);
}
};
struct is_not_zero_double {
__host__ __device__
bool operator()(const double x) {
return (x != 0);
}
};
template <typename dst_t, typename src_t>
void copy_kernel_impl(TensorIterator& iter) {
gpu_kernel(iter, []GPU_LAMBDA(src_t x) -> dst_t {
return static_cast<dst_t>(static_cast<native::inter_copy_type_t<dst_t>>(x));
});
}
// device-to-device copy, does type conversion
static void copy_device_to_device(TensorIterator& iter, bool non_blocking) {
int64_t numel = iter.numel();
// We can memcpy the memory if both tensors have the same type AND both
// tensors are contiguous after dimension coalescing and reordering.
bool same_type = iter.dtype(0) == iter.dtype(1);
bool memcpy_eligible = same_type && iter.is_contiguous();
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
HIPGuardMasqueradingAsCUDA device_guard(src_device);
// We always perform the copy on the source device, using the current stream
// on the source device, and we fully synchronize on both src and dst's
// current streams for completion of the copy. We have to explicitly do this
// for non-contig copies. This mimics the behavior of cross-device
// hipMemcpyAsync on the default stream.
HIPStreamMasqueradingAsCUDA copy_stream = getCurrentHIPStreamMasqueradingAsCUDA(src_device.index());
if (src_device != dst_device) {
// This is a cross-device copy on the src current stream and dst current
// stream. We perform a two-way barrier between both devices' streams
// before the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are handled, so
// that no one is operating on the dst memory when we perform the copy.
// src waits on dst barrier (src already waits on src)
CUDAEvent dst_ready;
device_guard.set_device(dst_device);
dst_ready.record(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index()));
device_guard.set_device(src_device);
dst_ready.block(copy_stream);
}
if (memcpy_eligible) {
// Perform the copy
AT_CUDA_CHECK(hipMemcpyAsync(
iter.data_ptr(0),
iter.data_ptr(1),
numel * iter.element_size(0),
hipMemcpyDeviceToDevice,
copy_stream));
} else {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.dtype(0), "copy_", [&] {
using dst_t = scalar_t;
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.dtype(1), "copy_", [&] {
copy_kernel_impl<dst_t, scalar_t>(iter);
});
});
}
if (src_device != dst_device) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on src_device, record stream event
CUDAEvent src_ready;
src_ready.record(copy_stream);
device_guard.set_device(dst_device);
src_ready.block(getCurrentHIPStreamMasqueradingAsCUDA(dst_device.index()));
}
// AT_CUDA_CHECK(hipGetLastError());
}
static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) {
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
if (dst_device == src_device) {
// We never require temporaries for copies on the same GPU.
TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda());
return false;
}
bool same_dtype = iter.dtype(0) == iter.dtype(1);
if (same_dtype && iter.is_contiguous()) {
// Contiguous same-dtype copies can always use hipMemcpyAsync
return false;
} else if (dst_device.is_cuda() && src_device.is_cuda()) {
// Copies between GPUs can use the copy kernel if P2P is supported
return !p2p_enabled;
} else {
// The remaining cases require temporaries. For example, this includes
// non-contiguous copies between CPU and GPU.
return true;
}
}
static bool maybe_enable_p2p_access(Device dst_device, Device src_device) {
if (dst_device.is_cpu() || src_device.is_cpu()) {
return false;
}
return THCState_getPeerToPeerAccess(
globalContext().getTHCState(), src_device.index(), dst_device.index());
}
static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) {
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it invovles the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1));
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
// Copy on GPU (or between GPUs)
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
// Copy between CPU and GPU
hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
hipMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = hipMemcpyHostToDevice;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = hipMemcpyDeviceToHost;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA();
AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream));
if (non_blocking) {
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
AT_CUDA_CHECK(hipStreamSynchronize(stream));
}
}
static void ARC_copy_kernel_cuda(TensorIterator& iter, bool non_blocking, int tid, bool is_csr) {
// [JS] SSD flag only concerned if 'dir' information is properly saved at arc_vm
// only this case is from offload and prefetch.
arcp2p_dir dir = arc_vm.get_dir(tid);
bool ssd_flag = arc_vm.is_using_ssd() && (dir != arcp2p_unused);
// [JS] now fp16 & csr option is delivered by flag setting
// Note. FP16 should be set when csr is set.
// So we don't case about FP16=false & CSR=true case
bool fp16_flag = arc_vm.is_fp16();
bool csr_flag = arc_vm.is_csr();
// [JS] clear dir value, to avoid confusion
arc_vm.set_dir(tid, arcp2p_unused);
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it invovles the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
/*
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1));
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
*/
// Copy on GPU (or between GPUs)
/*
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
*/
// Copy between CPU and GPU
hip::OptionalHIPGuardMasqueradingAsCUDA device_guard;
hipMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = hipMemcpyHostToDevice;
arc_vm.event_arr_h2d[tid] = true;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = hipMemcpyDeviceToHost;
arc_vm.event_arr_d2h[tid] = true;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
uint64_t p2p_addr = 0, p2p_size = 0;
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
// HIPStreamMasqueradingAsCUDA stream = getCurrentHIPStreamMasqueradingAsCUDA();
hipStream_t stream = at::native::arc_vm.arc_stream;
arc_vm.set_elem(tid, iter.element_size(0));
if (true == ssd_flag) {
if (!arc_vm.mapping) {
// [TODO] this should be called only for Tesla option enabled
void* deviceAddr = arc_vm.get_device_addr();
uint64_t deviceSz = arc_vm.get_device_sz();
arc_vm.Arcp2pBarMapping((uint64_t)deviceAddr, deviceSz);
arc_vm.mapping = true;
}
}
size_t bit_elements, pos_elements, pos_elements_before;
if (csr_flag) {
bit_elements = (size_t)((iter.numel() + 1024 - 1) / 1024) * 32;
pos_elements_before = (size_t)((iter.numel() + 32 - 1) / 32);
int count = 0;
while (pos_elements_before != 0) {
pos_elements_before = pos_elements_before >> 1; count++;
}
pos_elements = 1 << count;
}
if (kind == hipMemcpyDeviceToHost) {
if (iter.element_size(0) >= 4) {
if (csr_flag && is_csr) {
void *fp16, *bit, *pos;
arc_vm.p2p_malloc(&bit, sizeof(unsigned int) * bit_elements);
arc_vm.p2p_malloc(&pos, sizeof(unsigned int) * pos_elements);
arc_vm.set_bit_addr(tid, (uint64_t)bit);
arc_vm.set_pos_addr(tid, (uint64_t)pos);
unsigned int *nz_pos;
arc_vm.p2p_malloc((void **)&nz_pos, pos_elements * sizeof(unsigned int));
hipMemsetAsync((void *)bit, 0, sizeof(unsigned int) * bit_elements, stream);
hipMemsetAsync((void *)pos, 0, sizeof(unsigned int) * pos_elements, stream);
hipMemsetAsync((void *)nz_pos, 0, sizeof(unsigned int) * pos_elements, stream);
void *nz_src;
if (iter.element_size(0) == 8) {
arc_vm.p2p_malloc((void **)&nz_src, iter.numel() * sizeof(double));
hipMemsetAsync((void *)nz_src, 0, sizeof(double) * iter.numel(), stream);
thrust::device_ptr<double> dA_V((double *)src);
thrust::device_ptr<double> dA_R((double *)nz_src);
thrust::copy_if(dA_V, dA_V + iter.numel(), dA_R, is_not_zero_double());
} else {
arc_vm.p2p_malloc((void **)&nz_src, iter.numel() * sizeof(float));
hipMemsetAsync((void *)nz_src, 0, sizeof(float) * iter.numel(), stream);
thrust::device_ptr<float> dA_V((float *)src);
thrust::device_ptr<float> dA_R((float *)nz_src);
thrust::copy_if(dA_V, dA_V + iter.numel(), dA_R, is_not_zero());
}
hipLaunchKernelGGL(( zero_mask), dim3((iter.numel() + nTPB - 1) / nTPB), dim3(nTPB), 0, stream, (float *)src, (unsigned int *)bit, nz_pos, iter.numel());
hipLaunchKernelGGL(( pos_first), dim3(nblocks), dim3(nthreads), 0, stream, nz_pos, pos_elements);
hipLaunchKernelGGL(( pos_second), dim3(nblocks), dim3(nthreads), 0, stream, nz_pos, (unsigned int*)pos, pos_elements);
int resize = 0;
hipMemcpyAsync((void *)&resize, (void *)((size_t)pos + sizeof(unsigned int) * (pos_elements - 1)),
sizeof(int), hipMemcpyDeviceToHost, stream);
arc_vm.p2p_malloc(&fp16, sizeof(__half) * resize);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
hipLaunchKernelGGL(( half_scale), dim3((iter.numel() + nTPB - 1) / nTPB), dim3(nTPB), 0, stream, (float *)nz_src, (__half *)fp16, resize);
arc_vm.set_resize(tid, resize);
arc_vm.set_numel(tid, iter.numel());
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "CSR in d2h, resize: " << resize << ", original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
if (true == ssd_flag) {
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)(resize * sizeof(__half));
} else {
AT_CUDA_CHECK(hipMemcpyAsync(dst, fp16, resize * sizeof(__half), kind, stream));
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "CSR FP16 mem free tid: " << tid << ", size: " << sizeof(__half) * resize << std::endl;
}
arc_vm.p2p_free(fp16, resize * sizeof(__half));
arc_vm.event_arr_d2h[tid] = false;
}
arc_vm.p2p_free((void *)nz_pos, pos_elements * sizeof(unsigned int));
arc_vm.p2p_free((void *)nz_src, iter.numel() * sizeof(float));
} else if (fp16_flag) {
// this case include both cases
// 1. csr_flag==true && is_csr==false (csr_flag==true always guarantee fp16_flag==true)
// 2. csr_flag==false && fp16_flag==true
// keep print message for debug purpose
void *fp16;
arc_vm.p2p_malloc(&fp16, sizeof(__half) * iter.numel());
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
if (globalContext().ARCGlobal.isDebugMode()) {
if (csr_flag) {
std::cout << "No CSR in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16 addr: " << fp16 << std::endl;
} else {
std::cout << "FP16 in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16 addr: " << fp16 << std::endl;
}
}
hipLaunchKernelGGL(( half_scale), dim3((iter.numel() + nTPB - 1) / nTPB), dim3(nTPB), 0, stream, (float *)src, (__half *)fp16, iter.numel());
arc_vm.set_resize(tid, 0); // [TODO] slight hack code, we will distinguish CSR / FP16 by resize value
arc_vm.set_numel(tid, iter.numel());
if (true == ssd_flag) {
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)(iter.numel() * sizeof(__half));
} else {
AT_CUDA_CHECK(hipMemcpyAsync(dst, fp16, sizeof(__half) * iter.numel(), kind, stream));
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "No CSR FP16 mem free tid: " << tid << ", size: " << sizeof(__half) * iter.numel() << std::endl;
}
arc_vm.p2p_free(fp16, iter.numel() * sizeof(__half));
arc_vm.event_arr_d2h[tid] = false;
}
} else { // false == csr_flag && false == fp16_flag
if (true == ssd_flag) {
// TODO Need to malloc src ptr to BAR attached region
void *fp16;
arc_vm.p2p_malloc(&fp16, nbytes);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
arc_vm.set_numel(tid, (size_t)nbytes);
arc_vm.set_resize(tid, -1); // [TODO] slight hack code, we will distinguish CSR / FP16 by resize value
AT_CUDA_CHECK(hipMemcpyAsync(fp16, src, nbytes, hipMemcpyDeviceToDevice, stream));
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)nbytes;
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "Nothing in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
} else {
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "Nothing in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << std::endl;
}
arc_vm.set_resize(tid, -1); // [TODO] slight hack code, we will distinguish CSR / FP16 by resize value
arc_vm.set_numel(tid, iter.numel());
arc_vm.set_fp16_addr(tid, (uint64_t)NULL);
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "TODO: Duplicated FP16 mem free tid: " << tid << std::endl;
}
AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream));
arc_vm.event_arr_d2h[tid] = false;
}
}
} else { // Non double or float
if (true == ssd_flag) {
// TODO Need to malloc src ptr to BAR attached region
void *fp16;
arc_vm.p2p_malloc(&fp16, nbytes);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
arc_vm.set_resize(tid, -1); // [TODO] slight hack code, we will distinguish CSR / FP16 by resize value
arc_vm.set_numel(tid, (size_t)nbytes);
AT_CUDA_CHECK(hipMemcpyAsync(fp16, src, nbytes, hipMemcpyDeviceToDevice, stream));
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)nbytes;
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "No float/double in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
} else {
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "No float/double in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << std::endl;
}
arc_vm.set_resize(tid, -1); // [TODO] slight hack code, we will distinguish CSR / FP16 by resize value
arc_vm.set_numel(tid, iter.numel());
arc_vm.set_fp16_addr(tid, (uint64_t)NULL);
AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream));
arc_vm.event_arr_d2h[tid] = false;
}
}
}
if (kind == hipMemcpyHostToDevice) {
if (iter.element_size(0) >= 4) {
if (csr_flag && is_csr) {
void* bit = arc_vm.get_bit_addr(tid);
void* pos = arc_vm.get_pos_addr(tid);
int resize = arc_vm.get_resize(tid);
void* fp16;
arc_vm.p2p_malloc(&fp16, sizeof(__half) * resize);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "CSR in h2d, resize: " << resize << ", original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
if (ssd_flag) {
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)(resize * sizeof(__half));
// [JS] all backend job will be called at Arcp2pCompletion
} else {
float *nz_dst;
arc_vm.p2p_malloc((void **)&nz_dst, resize * sizeof(float));
hipMemsetAsync((void *)nz_dst, 0, resize * sizeof(float), stream);
AT_CUDA_CHECK(hipMemcpyAsync(fp16, src, resize * sizeof(__half), kind, stream));
hipLaunchKernelGGL(( float_scale), dim3((iter.numel() + nTPB - 1) / nTPB), dim3(nTPB), 0, stream, (__half *)fp16, nz_dst, resize);
// float_scale<<<(resize + nTPB - 1) / nTPB, nTPB, 0, stream>>>((__half *)fp16, nz_dst, resize);
if (iter.element_size(0) == 8) {
hipLaunchKernelGGL(( zero_insert_double), dim3((iter.numel() + nTPB - 1) / nTPB), dim3(nTPB), 0, stream, (unsigned int*)bit, (unsigned int*)pos, nz_dst, (double *)dst, iter.numel());
} else {
hipLaunchKernelGGL(( zero_insert_float), dim3((iter.numel() + nTPB - 1) / nTPB), dim3(nTPB), 0, stream, (unsigned int*)bit, (unsigned int*)pos, nz_dst, (float *)dst, iter.numel());
}
arc_vm.p2p_free((void *)nz_dst, resize * sizeof(float));
}
} else if (fp16_flag) {
// keep print message for debug purpose
void* fp16;
arc_vm.p2p_malloc(&fp16, sizeof(__half) * iter.numel());
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
arc_vm.set_numel(tid, iter.numel());
arc_vm.set_resize(tid, 0);
if (globalContext().ARCGlobal.isDebugMode()) {
if (csr_flag) {
std::cout << "No CSR in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << ", requested size: " << sizeof(__half) * iter.numel() << std::endl;
} else {
std::cout << "FP16 in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
}
if (ssd_flag) {
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)(iter.numel() * sizeof(__half));
} else {
AT_CUDA_CHECK(hipMemcpyAsync(fp16, src, iter.numel() * sizeof(__half), kind, stream));
if (iter.element_size(0) == 8) {
hipLaunchKernelGGL(( double_scale), dim3((iter.numel() + nTPB - 1) / nTPB), dim3(nTPB), 0, stream, (__half* )fp16, (double*)dst, iter.numel());
} else {
hipLaunchKernelGGL(( float_scale), dim3((iter.numel() + nTPB - 1) / nTPB), dim3(nTPB), 0, stream, (__half* )fp16, (float*)dst, iter.numel());
}
if (at::globalContext().ARCGlobal.isOnDemand()) {
hipStreamSynchronize(stream);
arc_vm.event_arr_h2d[tid] = false;
}
}
} else {
if (true == ssd_flag) {
void* fp16;
arc_vm.p2p_malloc(&fp16, nbytes);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)nbytes;
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "Nothing in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
} else {
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "Nothing in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << std::endl;
}
arc_vm.set_fp16_addr(tid, (uint64_t)NULL);
AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream));
if (at::globalContext().ARCGlobal.isOnDemand()) {
hipStreamSynchronize(stream);
arc_vm.event_arr_h2d[tid] = false;
}
}
}
} else {
if (true == ssd_flag) {
void* fp16;
arc_vm.p2p_malloc(&fp16, nbytes);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)nbytes;
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "No float/double in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
} else {
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "No float/double in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << std::endl;
}
arc_vm.set_fp16_addr(tid, (uint64_t)NULL);
AT_CUDA_CHECK(hipMemcpyAsync(dst, src, nbytes, kind, stream));
if (at::globalContext().ARCGlobal.isOnDemand()) {
hipStreamSynchronize(stream);
arc_vm.event_arr_h2d[tid] = false;
}
}
}
}
if (ssd_flag) {
uint64_t *p_offs = arc_vm.get_offset_ptr(tid);
arcp2p_cpl *p_cpl = (arcp2p_cpl *)arc_vm.get_cpl_addr(tid, dir);
if (arcp2p_gputossd == dir) {
c10::Storage *stor = nullptr;
if (false == fp16_flag) {
stor = new c10::Storage;
*stor = iter.tensor(1).storage();
}
arcp2p_info *info = nullptr;
// if (true == fp16_flag) {
info = new arcp2p_info;
info->tid = (uint64_t)tid;
info->ptr = arc_vm.get_fp16_addr(tid);
// }
// arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, stor, info);
// arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, stor, info, stream.stream());
arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, stor, info, stream);
arc_vm.Arcp2pCompletion(false);
} else if (arcp2p_ssdtogpu == dir) {
arcp2p_info *info = nullptr;
// if (true == fp16_flag) {
info = new arcp2p_info;
info->tid = (uint64_t)tid;
info->numel = (uint64_t)iter.numel();
info->ntpb = nTPB;
info->dst = iter.data_ptr(0);
info->src = iter.data_ptr(1);
info->ptr = arc_vm.get_fp16_addr(tid);
// }
// arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, nullptr, info);
// arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, nullptr, info, stream.stream());
arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, nullptr, info, stream);
arc_vm.Arcp2pCompletion(false);
}
}
/*
} else {
if (non_blocking) {
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
AT_CUDA_CHECK(hipStreamSynchronize(stream));
}
}
*/
}
REGISTER_DISPATCH(copy_stub, ©_kernel_cuda);
REGISTER_DISPATCH(ARC_copy_stub, &ARC_copy_kernel_cuda);
} // namespace native
} // namespace at
| 99f9c5b4c12fffcd4b23beefc361ef2f7903699d.cu | #include <ATen/ATen.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAEvent.h>
#include <c10/cuda/CUDAStream.h>
#include <ATen/native/Copy.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <THC/THC.h>
#include <cuda_fp16.h>
#include <thrust/device_ptr.h>
#include <thrust/copy.h>
#include <ATen/native/cuda/arc_flag.h>
#include <c10/cuda/CUDACachingAllocator.h>
#define nTPB 512
#define per_threads 256
#define nthreads 256
#define nblocks 256
#define find(n) (32 * (unsigned int)(n / 1024) + (n % 32))
#define mask(n) (0x80000000 >> (unsigned int)((n % 1024) / 32))
namespace at {
namespace native {
using namespace at::cuda;
__global__ void half_scale(float *din, __half *dout, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) dout[idx] = __float2half(din[idx]);
}
__global__ void float_scale(__half *din, float *dout, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) dout[idx] = __half2float(din[idx]);
}
__global__ void double_scale(__half *din, double *dout, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) dout[idx] = (double)__half2float(din[idx]);
}
__global__ void zero_mask(float *din, unsigned int *bit, unsigned int *pos, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) {
if (din[idx] != 0.0f) {
atomicAdd(&bit[find(idx)], mask(idx));
atomicAdd(&pos[(unsigned int)(idx / 32)], 1);
}
}
}
__global__ void pos_first(unsigned int* pos, int asize) {
int total_idx = nblocks * nthreads;
for (int j = 0; j < (asize / per_threads / total_idx + 1); j++) {
int global_idx = threadIdx.x + blockIdx.x * blockDim.x;
if ((global_idx + 1) * per_threads - 1 <= asize) {
for (int i = 0; i < per_threads; i++) {
int idx = global_idx * per_threads + i;
if (idx % per_threads != 0) {
pos[idx] += pos[idx - 1];
}
}
}
}
}
__global__ void pos_second(unsigned int* pos, unsigned int* opos, int asize) {
int total_idx = nblocks * nthreads;
for (int j = 0; j < (asize / per_threads / total_idx + 1); j++) {
int global_idx = threadIdx.x + blockIdx.x * blockDim.x;
if ((global_idx + 1) * per_threads - 1 <= asize) {
unsigned int temp = 0;
for (int i = 0; i < global_idx; i++) {
int idx = (i + 1) * per_threads - 1;
temp += pos[idx];
}
for (int i = 0; i < per_threads; i++) {
int idx = (global_idx) * per_threads + i;
opos[idx] = pos[idx] + temp;
}
}
}
}
__global__ void zero_insert_double(unsigned int *bit, unsigned int *nz_pos, float* din, double *dout, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) {
int count = -1;
if ((unsigned int)(bit[find(idx)] & mask(idx)) > 0) {
for (int i = (int)(idx / 32) * 32; i < idx + 1; i++) {
unsigned int mask = bit[find(i)] & mask(i);
if (mask > 0) count += 1;
}
}
if (count == -1) dout[idx] = 0.0;
else {
if ((unsigned int)(idx / 32) == 0) {
dout[idx] = (double)din[count + 0];
} else {
dout[idx] = (double)din[count + nz_pos[(unsigned int)(idx / 32) - 1]];
}
}
}
}
__global__ void zero_insert_float(unsigned int *bit, unsigned int *nz_pos, float* din, float *dout, int dsize) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < dsize) {
int count = -1;
if ((unsigned int)(bit[find(idx)] & mask(idx)) > 0) {
for (int i = (int)(idx / 32) * 32; i < idx + 1; i++) {
unsigned int mask = bit[find(i)] & mask(i);
if (mask > 0) count += 1;
}
}
if (count == -1) dout[idx] = 0.0f;
else {
if ((unsigned int)(idx / 32) == 0) {
dout[idx] = din[count + 0];
} else {
dout[idx] = din[count + nz_pos[(unsigned int)(idx / 32) - 1]];
}
}
}
}
struct is_not_zero {
__host__ __device__
bool operator()(const float x) {
return (x != 0);
}
};
struct is_not_zero_double {
__host__ __device__
bool operator()(const double x) {
return (x != 0);
}
};
template <typename dst_t, typename src_t>
void copy_kernel_impl(TensorIterator& iter) {
gpu_kernel(iter, []GPU_LAMBDA(src_t x) -> dst_t {
return static_cast<dst_t>(static_cast<native::inter_copy_type_t<dst_t>>(x));
});
}
// device-to-device copy, does type conversion
static void copy_device_to_device(TensorIterator& iter, bool non_blocking) {
int64_t numel = iter.numel();
// We can memcpy the memory if both tensors have the same type AND both
// tensors are contiguous after dimension coalescing and reordering.
bool same_type = iter.dtype(0) == iter.dtype(1);
bool memcpy_eligible = same_type && iter.is_contiguous();
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
CUDAGuard device_guard(src_device);
// We always perform the copy on the source device, using the current stream
// on the source device, and we fully synchronize on both src and dst's
// current streams for completion of the copy. We have to explicitly do this
// for non-contig copies. This mimics the behavior of cross-device
// cudaMemcpyAsync on the default stream.
CUDAStream copy_stream = getCurrentCUDAStream(src_device.index());
if (src_device != dst_device) {
// This is a cross-device copy on the src current stream and dst current
// stream. We perform a two-way barrier between both devices' streams
// before the copy. This ensures that any write-after-write and
// write-after-read dependencies on the destination side are handled, so
// that no one is operating on the dst memory when we perform the copy.
// src waits on dst barrier (src already waits on src)
CUDAEvent dst_ready;
device_guard.set_device(dst_device);
dst_ready.record(getCurrentCUDAStream(dst_device.index()));
device_guard.set_device(src_device);
dst_ready.block(copy_stream);
}
if (memcpy_eligible) {
// Perform the copy
AT_CUDA_CHECK(cudaMemcpyAsync(
iter.data_ptr(0),
iter.data_ptr(1),
numel * iter.element_size(0),
cudaMemcpyDeviceToDevice,
copy_stream));
} else {
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.dtype(0), "copy_", [&] {
using dst_t = scalar_t;
AT_DISPATCH_ALL_TYPES_AND2(kHalf, kBool, iter.dtype(1), "copy_", [&] {
copy_kernel_impl<dst_t, scalar_t>(iter);
});
});
}
if (src_device != dst_device) {
// dst waits on src barrier (dst already waits on dst). We cannot
// operate on dst's copy until the copy is complete.
// Still on src_device, record stream event
CUDAEvent src_ready;
src_ready.record(copy_stream);
device_guard.set_device(dst_device);
src_ready.block(getCurrentCUDAStream(dst_device.index()));
}
// AT_CUDA_CHECK(cudaGetLastError());
}
static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) {
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
if (dst_device == src_device) {
// We never require temporaries for copies on the same GPU.
TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda());
return false;
}
bool same_dtype = iter.dtype(0) == iter.dtype(1);
if (same_dtype && iter.is_contiguous()) {
// Contiguous same-dtype copies can always use cudaMemcpyAsync
return false;
} else if (dst_device.is_cuda() && src_device.is_cuda()) {
// Copies between GPUs can use the copy kernel if P2P is supported
return !p2p_enabled;
} else {
// The remaining cases require temporaries. For example, this includes
// non-contiguous copies between CPU and GPU.
return true;
}
}
static bool maybe_enable_p2p_access(Device dst_device, Device src_device) {
if (dst_device.is_cpu() || src_device.is_cpu()) {
return false;
}
return THCState_getPeerToPeerAccess(
globalContext().getTHCState(), src_device.index(), dst_device.index());
}
static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) {
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it invovles the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1));
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
// Copy on GPU (or between GPUs)
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
// Copy between CPU and GPU
cuda::OptionalCUDAGuard device_guard;
cudaMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = cudaMemcpyHostToDevice;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = cudaMemcpyDeviceToHost;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
CUDAStream stream = getCurrentCUDAStream();
AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
if (non_blocking) {
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
}
}
static void ARC_copy_kernel_cuda(TensorIterator& iter, bool non_blocking, int tid, bool is_csr) {
// [JS] SSD flag only concerned if 'dir' information is properly saved at arc_vm
// only this case is from offload and prefetch.
arcp2p_dir dir = arc_vm.get_dir(tid);
bool ssd_flag = arc_vm.is_using_ssd() && (dir != arcp2p_unused);
// [JS] now fp16 & csr option is delivered by flag setting
// Note. FP16 should be set when csr is set.
// So we don't case about FP16=false & CSR=true case
bool fp16_flag = arc_vm.is_fp16();
bool csr_flag = arc_vm.is_csr();
// [JS] clear dir value, to avoid confusion
arc_vm.set_dir(tid, arcp2p_unused);
AT_ASSERT(iter.ntensors() == 2);
Device dst_device = iter.device(0);
Device src_device = iter.device(1);
// Enable p2p access between devices. (No-op if it invovles the CPU)
bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
/*
if (copy_requires_temporaries(iter, p2p_enabled)) {
// NB: this involves recursive calls to copy. Be careful that those copies
// don't require temporaries or you will cause an infinite recursion!
auto& dst = iter.tensor(0);
Tensor dst_contig;
Tensor src_contig;
// Type conversions are performed on the CPU for CPU-GPU copies and on
// the src device for GPU-GPU copies.
if (iter.device_type(0) == kCUDA) {
dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst);
src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
} else {
bool same_type = iter.dtype(0) == iter.dtype(1);
dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1));
src_contig = iter.tensor(1).expand_as(dst).contiguous();
}
// perform a same-dtype copy on contiguous tensors
TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
dst_contig.copy_(src_contig, non_blocking);
// if necessary, copy back into dst
if (!dst_contig.is_same(dst)) {
TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
dst.copy_(dst_contig, non_blocking);
}
return;
}
*/
// Copy on GPU (or between GPUs)
/*
if (dst_device.is_cuda() && src_device.is_cuda()) {
copy_device_to_device(iter, non_blocking);
return;
}
*/
// Copy between CPU and GPU
cuda::OptionalCUDAGuard device_guard;
cudaMemcpyKind kind;
if (dst_device.is_cuda() && src_device.is_cpu()) {
device_guard.set_device(dst_device);
kind = cudaMemcpyHostToDevice;
arc_vm.event_arr_h2d[tid] = true;
} else if (dst_device.is_cpu() && src_device.is_cuda()) {
device_guard.set_device(src_device);
kind = cudaMemcpyDeviceToHost;
arc_vm.event_arr_d2h[tid] = true;
} else {
TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
}
uint64_t p2p_addr = 0, p2p_size = 0;
void* dst = iter.data_ptr(0);
void* src = iter.data_ptr(1);
int64_t nbytes = iter.numel() * iter.element_size(0);
// CUDAStream stream = getCurrentCUDAStream();
cudaStream_t stream = at::native::arc_vm.arc_stream;
arc_vm.set_elem(tid, iter.element_size(0));
if (true == ssd_flag) {
if (!arc_vm.mapping) {
// [TODO] this should be called only for Tesla option enabled
void* deviceAddr = arc_vm.get_device_addr();
uint64_t deviceSz = arc_vm.get_device_sz();
arc_vm.Arcp2pBarMapping((uint64_t)deviceAddr, deviceSz);
arc_vm.mapping = true;
}
}
size_t bit_elements, pos_elements, pos_elements_before;
if (csr_flag) {
bit_elements = (size_t)((iter.numel() + 1024 - 1) / 1024) * 32;
pos_elements_before = (size_t)((iter.numel() + 32 - 1) / 32);
int count = 0;
while (pos_elements_before != 0) {
pos_elements_before = pos_elements_before >> 1; count++;
}
pos_elements = 1 << count;
}
if (kind == cudaMemcpyDeviceToHost) {
if (iter.element_size(0) >= 4) {
if (csr_flag && is_csr) {
void *fp16, *bit, *pos;
arc_vm.p2p_malloc(&bit, sizeof(unsigned int) * bit_elements);
arc_vm.p2p_malloc(&pos, sizeof(unsigned int) * pos_elements);
arc_vm.set_bit_addr(tid, (uint64_t)bit);
arc_vm.set_pos_addr(tid, (uint64_t)pos);
unsigned int *nz_pos;
arc_vm.p2p_malloc((void **)&nz_pos, pos_elements * sizeof(unsigned int));
cudaMemsetAsync((void *)bit, 0, sizeof(unsigned int) * bit_elements, stream);
cudaMemsetAsync((void *)pos, 0, sizeof(unsigned int) * pos_elements, stream);
cudaMemsetAsync((void *)nz_pos, 0, sizeof(unsigned int) * pos_elements, stream);
void *nz_src;
if (iter.element_size(0) == 8) {
arc_vm.p2p_malloc((void **)&nz_src, iter.numel() * sizeof(double));
cudaMemsetAsync((void *)nz_src, 0, sizeof(double) * iter.numel(), stream);
thrust::device_ptr<double> dA_V((double *)src);
thrust::device_ptr<double> dA_R((double *)nz_src);
thrust::copy_if(dA_V, dA_V + iter.numel(), dA_R, is_not_zero_double());
} else {
arc_vm.p2p_malloc((void **)&nz_src, iter.numel() * sizeof(float));
cudaMemsetAsync((void *)nz_src, 0, sizeof(float) * iter.numel(), stream);
thrust::device_ptr<float> dA_V((float *)src);
thrust::device_ptr<float> dA_R((float *)nz_src);
thrust::copy_if(dA_V, dA_V + iter.numel(), dA_R, is_not_zero());
}
zero_mask<<<(iter.numel() + nTPB - 1) / nTPB, nTPB, 0, stream>>>((float *)src, (unsigned int *)bit, nz_pos, iter.numel());
pos_first<<<nblocks, nthreads, 0, stream>>>(nz_pos, pos_elements);
pos_second<<<nblocks, nthreads, 0, stream>>>(nz_pos, (unsigned int*)pos, pos_elements);
int resize = 0;
cudaMemcpyAsync((void *)&resize, (void *)((size_t)pos + sizeof(unsigned int) * (pos_elements - 1)),
sizeof(int), cudaMemcpyDeviceToHost, stream);
arc_vm.p2p_malloc(&fp16, sizeof(__half) * resize);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
half_scale<<<(iter.numel() + nTPB - 1) / nTPB, nTPB, 0, stream>>>((float *)nz_src, (__half *)fp16, resize);
arc_vm.set_resize(tid, resize);
arc_vm.set_numel(tid, iter.numel());
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "CSR in d2h, resize: " << resize << ", original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
if (true == ssd_flag) {
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)(resize * sizeof(__half));
} else {
AT_CUDA_CHECK(cudaMemcpyAsync(dst, fp16, resize * sizeof(__half), kind, stream));
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "CSR FP16 mem free tid: " << tid << ", size: " << sizeof(__half) * resize << std::endl;
}
arc_vm.p2p_free(fp16, resize * sizeof(__half));
arc_vm.event_arr_d2h[tid] = false;
}
arc_vm.p2p_free((void *)nz_pos, pos_elements * sizeof(unsigned int));
arc_vm.p2p_free((void *)nz_src, iter.numel() * sizeof(float));
} else if (fp16_flag) {
// this case include both cases
// 1. csr_flag==true && is_csr==false (csr_flag==true always guarantee fp16_flag==true)
// 2. csr_flag==false && fp16_flag==true
// keep print message for debug purpose
void *fp16;
arc_vm.p2p_malloc(&fp16, sizeof(__half) * iter.numel());
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
if (globalContext().ARCGlobal.isDebugMode()) {
if (csr_flag) {
std::cout << "No CSR in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16 addr: " << fp16 << std::endl;
} else {
std::cout << "FP16 in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16 addr: " << fp16 << std::endl;
}
}
half_scale<<<(iter.numel() + nTPB - 1) / nTPB, nTPB, 0, stream>>>((float *)src, (__half *)fp16, iter.numel());
arc_vm.set_resize(tid, 0); // [TODO] slight hack code, we will distinguish CSR / FP16 by resize value
arc_vm.set_numel(tid, iter.numel());
if (true == ssd_flag) {
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)(iter.numel() * sizeof(__half));
} else {
AT_CUDA_CHECK(cudaMemcpyAsync(dst, fp16, sizeof(__half) * iter.numel(), kind, stream));
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "No CSR FP16 mem free tid: " << tid << ", size: " << sizeof(__half) * iter.numel() << std::endl;
}
arc_vm.p2p_free(fp16, iter.numel() * sizeof(__half));
arc_vm.event_arr_d2h[tid] = false;
}
} else { // false == csr_flag && false == fp16_flag
if (true == ssd_flag) {
// TODO Need to malloc src ptr to BAR attached region
void *fp16;
arc_vm.p2p_malloc(&fp16, nbytes);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
arc_vm.set_numel(tid, (size_t)nbytes);
arc_vm.set_resize(tid, -1); // [TODO] slight hack code, we will distinguish CSR / FP16 by resize value
AT_CUDA_CHECK(cudaMemcpyAsync(fp16, src, nbytes, cudaMemcpyDeviceToDevice, stream));
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)nbytes;
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "Nothing in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
} else {
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "Nothing in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << std::endl;
}
arc_vm.set_resize(tid, -1); // [TODO] slight hack code, we will distinguish CSR / FP16 by resize value
arc_vm.set_numel(tid, iter.numel());
arc_vm.set_fp16_addr(tid, (uint64_t)NULL);
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "TODO: Duplicated FP16 mem free tid: " << tid << std::endl;
}
AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
arc_vm.event_arr_d2h[tid] = false;
}
}
} else { // Non double or float
if (true == ssd_flag) {
// TODO Need to malloc src ptr to BAR attached region
void *fp16;
arc_vm.p2p_malloc(&fp16, nbytes);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
arc_vm.set_resize(tid, -1); // [TODO] slight hack code, we will distinguish CSR / FP16 by resize value
arc_vm.set_numel(tid, (size_t)nbytes);
AT_CUDA_CHECK(cudaMemcpyAsync(fp16, src, nbytes, cudaMemcpyDeviceToDevice, stream));
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)nbytes;
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "No float/double in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
} else {
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "No float/double in d2h, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << std::endl;
}
arc_vm.set_resize(tid, -1); // [TODO] slight hack code, we will distinguish CSR / FP16 by resize value
arc_vm.set_numel(tid, iter.numel());
arc_vm.set_fp16_addr(tid, (uint64_t)NULL);
AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
arc_vm.event_arr_d2h[tid] = false;
}
}
}
if (kind == cudaMemcpyHostToDevice) {
if (iter.element_size(0) >= 4) {
if (csr_flag && is_csr) {
void* bit = arc_vm.get_bit_addr(tid);
void* pos = arc_vm.get_pos_addr(tid);
int resize = arc_vm.get_resize(tid);
void* fp16;
arc_vm.p2p_malloc(&fp16, sizeof(__half) * resize);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "CSR in h2d, resize: " << resize << ", original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
if (ssd_flag) {
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)(resize * sizeof(__half));
// [JS] all backend job will be called at Arcp2pCompletion
} else {
float *nz_dst;
arc_vm.p2p_malloc((void **)&nz_dst, resize * sizeof(float));
cudaMemsetAsync((void *)nz_dst, 0, resize * sizeof(float), stream);
AT_CUDA_CHECK(cudaMemcpyAsync(fp16, src, resize * sizeof(__half), kind, stream));
float_scale<<<(iter.numel() + nTPB - 1) / nTPB, nTPB, 0, stream>>>((__half *)fp16, nz_dst, resize);
// float_scale<<<(resize + nTPB - 1) / nTPB, nTPB, 0, stream>>>((__half *)fp16, nz_dst, resize);
if (iter.element_size(0) == 8) {
zero_insert_double<<<(iter.numel() + nTPB - 1) / nTPB, nTPB, 0, stream>>>((unsigned int*)bit, (unsigned int*)pos, nz_dst, (double *)dst, iter.numel());
} else {
zero_insert_float<<<(iter.numel() + nTPB - 1) / nTPB, nTPB, 0, stream>>>((unsigned int*)bit, (unsigned int*)pos, nz_dst, (float *)dst, iter.numel());
}
arc_vm.p2p_free((void *)nz_dst, resize * sizeof(float));
}
} else if (fp16_flag) {
// keep print message for debug purpose
void* fp16;
arc_vm.p2p_malloc(&fp16, sizeof(__half) * iter.numel());
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
arc_vm.set_numel(tid, iter.numel());
arc_vm.set_resize(tid, 0);
if (globalContext().ARCGlobal.isDebugMode()) {
if (csr_flag) {
std::cout << "No CSR in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << ", requested size: " << sizeof(__half) * iter.numel() << std::endl;
} else {
std::cout << "FP16 in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
}
if (ssd_flag) {
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)(iter.numel() * sizeof(__half));
} else {
AT_CUDA_CHECK(cudaMemcpyAsync(fp16, src, iter.numel() * sizeof(__half), kind, stream));
if (iter.element_size(0) == 8) {
double_scale<<<(iter.numel() + nTPB - 1) / nTPB, nTPB, 0, stream>>>((__half* )fp16, (double*)dst, iter.numel());
} else {
float_scale<<<(iter.numel() + nTPB - 1) / nTPB, nTPB, 0, stream>>>((__half* )fp16, (float*)dst, iter.numel());
}
if (at::globalContext().ARCGlobal.isOnDemand()) {
cudaStreamSynchronize(stream);
arc_vm.event_arr_h2d[tid] = false;
}
}
} else {
if (true == ssd_flag) {
void* fp16;
arc_vm.p2p_malloc(&fp16, nbytes);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)nbytes;
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "Nothing in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
} else {
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "Nothing in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << std::endl;
}
arc_vm.set_fp16_addr(tid, (uint64_t)NULL);
AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
if (at::globalContext().ARCGlobal.isOnDemand()) {
cudaStreamSynchronize(stream);
arc_vm.event_arr_h2d[tid] = false;
}
}
}
} else {
if (true == ssd_flag) {
void* fp16;
arc_vm.p2p_malloc(&fp16, nbytes);
arc_vm.set_fp16_addr(tid, (uint64_t)fp16);
p2p_addr = (uint64_t)fp16;
p2p_size = (uint64_t)nbytes;
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "No float/double in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << ", fp16: " << fp16 << std::endl;
}
} else {
if (globalContext().ARCGlobal.isDebugMode()) {
std::cout << "No float/double in h2d, original: " << iter.numel() << ", elem_size: " << iter.element_size(0) << ", tid: " << tid << std::endl;
}
arc_vm.set_fp16_addr(tid, (uint64_t)NULL);
AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
if (at::globalContext().ARCGlobal.isOnDemand()) {
cudaStreamSynchronize(stream);
arc_vm.event_arr_h2d[tid] = false;
}
}
}
}
if (ssd_flag) {
uint64_t *p_offs = arc_vm.get_offset_ptr(tid);
arcp2p_cpl *p_cpl = (arcp2p_cpl *)arc_vm.get_cpl_addr(tid, dir);
if (arcp2p_gputossd == dir) {
c10::Storage *stor = nullptr;
if (false == fp16_flag) {
stor = new c10::Storage;
*stor = iter.tensor(1).storage();
}
arcp2p_info *info = nullptr;
// if (true == fp16_flag) {
info = new arcp2p_info;
info->tid = (uint64_t)tid;
info->ptr = arc_vm.get_fp16_addr(tid);
// }
// arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, stor, info);
// arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, stor, info, stream.stream());
arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, stor, info, stream);
arc_vm.Arcp2pCompletion(false);
} else if (arcp2p_ssdtogpu == dir) {
arcp2p_info *info = nullptr;
// if (true == fp16_flag) {
info = new arcp2p_info;
info->tid = (uint64_t)tid;
info->numel = (uint64_t)iter.numel();
info->ntpb = nTPB;
info->dst = iter.data_ptr(0);
info->src = iter.data_ptr(1);
info->ptr = arc_vm.get_fp16_addr(tid);
// }
// arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, nullptr, info);
// arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, nullptr, info, stream.stream());
arc_vm.Arcp2pSubmission(p2p_addr, p2p_size, p_offs, p_cpl, dir, nullptr, info, stream);
arc_vm.Arcp2pCompletion(false);
}
}
/*
} else {
if (non_blocking) {
void* ptr = (dst_device == kCPU ? dst : src);
AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
} else {
AT_CUDA_CHECK(cudaStreamSynchronize(stream));
}
}
*/
}
REGISTER_DISPATCH(copy_stub, ©_kernel_cuda);
REGISTER_DISPATCH(ARC_copy_stub, &ARC_copy_kernel_cuda);
} // namespace native
} // namespace at
|
26622b7c224b8ba35a66ac99b542da51e819cab4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
//////////////////////
//// Support Code
//////////////////////
#define INTDIV_POW2(a, b) (a >> b)
#define INTMOD_POW2(a, b) (a & ((1<<b)-1))
// GpuElemwise{Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, (True, True))
// Input 3 CudaNdarrayType(float32, (True, True))
// Input 4 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_1(unsigned int numEls
, const int dim0
, const float * i0_data, int i0_str_0
, const float * i1_data, int i1_str_0
, const float * i2_data, int i2_str_0
, const float * i3_data, int i3_str_0
, const float * i4_data, int i4_str_0
, float * o0_data, int o0_str_0
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i2_value = i2_data[0];
const float ii_i3_value = i3_data[0];
const float ii_i4_value = i4_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i1_data = i1_data;
float * ii_o0_data = o0_data;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i1_data += pos0 * i1_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i1_data[0] < ii_i2_value ? ii_i2_value : ii_i1_data[0] > ii_i3_value ? ii_i3_value : ii_i1_data[0];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i4_value - ii_i0_data[0];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = ii_i4_value - V_DUMMY_ID__tmp1;
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1);
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3);
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = ii_i0_data[0] * V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5;
o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7;
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, (True, True))
// Input 3 CudaNdarrayType(float32, (True, True))
// Input 4 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_2(unsigned int numEls
, const int dim0, const int dim1
, const float * i0_data, int i0_str_0, int i0_str_1
, const float * i1_data, int i1_str_0, int i1_str_1
, const float * i2_data, int i2_str_0, int i2_str_1
, const float * i3_data, int i3_str_0, int i3_str_1
, const float * i4_data, int i4_str_0, int i4_str_1
, float * o0_data, int o0_str_0, int o0_str_1
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i2_value = i2_data[0];
const float ii_i3_value = i3_data[0];
const float ii_i4_value = i4_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i1_data = i1_data;
float * ii_o0_data = o0_data;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i0_data += pos1 * i0_str_1;
ii_i1_data += pos1 * i1_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i1_data += pos0 * i1_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i1_data[0] < ii_i2_value ? ii_i2_value : ii_i1_data[0] > ii_i3_value ? ii_i3_value : ii_i1_data[0];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i4_value - ii_i0_data[0];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = ii_i4_value - V_DUMMY_ID__tmp1;
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1);
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3);
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = ii_i0_data[0] * V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5;
o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7;
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, (True, True))
// Input 3 CudaNdarrayType(float32, (True, True))
// Input 4 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_Ccontiguous (unsigned int numEls
, const float * i0_data
, const float * i1_data
, const float * i2_data
, const float * i3_data
, const float * i4_data
, float * o0_data
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i2_value = i2_data[0];
const float ii_i3_value = i3_data[0];
const float ii_i4_value = i4_data[0];
for (int i = idx; i < numEls; i += numThreads) {
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = i1_data[i] < ii_i2_value ? ii_i2_value : i1_data[i] > ii_i3_value ? ii_i3_value : i1_data[i];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i4_value - i0_data[i];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = ii_i4_value - V_DUMMY_ID__tmp1;
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1);
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3);
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = i0_data[i] * V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5;
o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7;
}
o0_data[i] = o0_i;
}
}
static void can_collapse_node_81712a72f6e99c01eca2afba8cbaebcd_0(int nd, const int * dims, const int * strides, int collapse[])
{
//can we collapse dims[i] and dims[i-1]
for(int i=nd-1;i>0;i--){
if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd
collapse[i]=1;
}else collapse[i]=0;
}
}
static int callkernel_node_81712a72f6e99c01eca2afba8cbaebcd_0(unsigned int numEls, const int d,
const int * dims,
const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, const float * i3_data, const int * i3_str, const float * i4_data, const int * i4_str,
float * o0_data, const int * o0_str)
{
numEls = dims[0]*dims[1]*1;
int local_dims[2];
int local_str[5][2];
int local_ostr[1][2];
int nd_collapse = 2;
for(int i=0;i<2;i++){//init new dim
local_dims[i]=dims[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[0][i]=i0_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[1][i]=i1_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[2][i]=i2_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[3][i]=i3_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[4][i]=i4_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_ostr[0][i]=o0_str[i];
}
for(int id=0;id<nd_collapse;id++){
bool all_broadcast=true;
for(int input_id=0;input_id<5;input_id++){
if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
for(int input_id=0;input_id<1;input_id++){
if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
if(all_broadcast){
for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
for(int input_id=0;input_id<5;input_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_str[input_id][j-1]=local_str[input_id][j];
}
}
for(int output_id=0;output_id<1;output_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_ostr[output_id][j-1]=local_ostr[output_id][j];
}
}
nd_collapse--; id--;
}
}
int nd_collapse_[2] = {1,1};
int nd_collapse_0[2] = {1,1};
can_collapse_node_81712a72f6e99c01eca2afba8cbaebcd_0(nd_collapse, local_dims, local_str[0], nd_collapse_0);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_0[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_1[2] = {1,1};
can_collapse_node_81712a72f6e99c01eca2afba8cbaebcd_0(nd_collapse, local_dims, local_str[1], nd_collapse_1);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_1[i]==0)
nd_collapse_[i]=0;
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[0][i-1]=local_str[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[0][j-1]=local_str[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[1][i-1]=local_str[1][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[1][j-1]=local_str[1][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[2][i-1]=local_str[2][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[2][j-1]=local_str[2][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[3][i-1]=local_str[3][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[3][j-1]=local_str[3][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[4][i-1]=local_str[4][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[4][j-1]=local_str[4][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_ostr[0][i-1]=local_ostr[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_ostr[0][j-1]=local_ostr[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_dims[i-1]*=local_dims[i];//set new dims
for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
}
}
for(int i=1, end=nd_collapse;i<end;i++){
if(nd_collapse_[i]==1)nd_collapse--;
}
if(nd_collapse == 1
&& local_str[0][nd_collapse-1]==1 && local_str[1][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1
){nd_collapse=0;}
if(numEls==0) return 0;
switch (nd_collapse==0?0:min(2,nd_collapse)) {
case 0: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_Ccontiguous), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, i0_data, i1_data, i2_data, i3_data, i4_data, o0_data);
//std::cerr << "calling callkernel returned\n";
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_81712a72f6e99c01eca2afba8cbaebcd_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, i4_data, o0_data)");
return -1;
}
return 0;
} break;
case 1: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_1), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], o0_data, local_ostr[0][0]);
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_81712a72f6e99c01eca2afba8cbaebcd_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], o0_data, local_ostr[0][0])");
return -1;
}
return 0;
} break;
case 2: {
//first use at least a full warp
int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_2), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], o0_data, local_ostr[0][0], local_ostr[0][1]);
CNDA_THREAD_SYNC;
hipError_t err = hipGetLastError();
if( hipSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_81712a72f6e99c01eca2afba8cbaebcd_0 Composite", hipGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], o0_data, local_ostr[0][0], local_ostr[0][1])");
return -1;
}
return 0;
} break;
}
return -2;
}
namespace {
struct __struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V7;
PyObject* storage_V9;
PyObject* storage_V11;
PyObject* storage_V1;
__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V11, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V7);
Py_XINCREF(storage_V9);
Py_XINCREF(storage_V11);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V7 = storage_V7;
this->storage_V9 = storage_V9;
this->storage_V11 = storage_V11;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_7:
double __DUMMY_7;
__label_9:
double __DUMMY_9;
__label_11:
double __DUMMY_11;
__label_14:
double __DUMMY_14;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V7);
Py_XDECREF(this->storage_V9);
Py_XDECREF(this->storage_V11);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
CudaNdarray * V5;
PyObject* py_V7;
CudaNdarray * V7;
PyObject* py_V9;
CudaNdarray * V9;
PyObject* py_V11;
CudaNdarray * V11;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V5))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
V5 = (CudaNdarray*)py_V5;
//std::cerr << "c_extract " << V5 << '\n';
if (V5->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V5->nd);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << " nd check passed\n";
assert(V5);
Py_INCREF(py_V5);
}
else if (py_V5 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract done " << V5 << '\n';
{
py_V7 = PyList_GET_ITEM(storage_V7, 0);
{Py_XINCREF(py_V7);}
assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V7))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
V7 = (CudaNdarray*)py_V7;
//std::cerr << "c_extract " << V7 << '\n';
if (V7->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V7->nd);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V7)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V7)[0], 0);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V7 << "checking bcast 0 <" << V7->str<< ">\n";
//std::cerr << "c_extract " << V7->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V7)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V7)[0], 0);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V7)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V7)[1], 1);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V7 << "checking bcast 1 <" << V7->str<< ">\n";
//std::cerr << "c_extract " << V7->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V7)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V7)[1], 1);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "bcast check 1 passed\n";
assert(V7);
Py_INCREF(py_V7);
}
else if (py_V7 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract done " << V7 << '\n';
{
py_V9 = PyList_GET_ITEM(storage_V9, 0);
{Py_XINCREF(py_V9);}
assert(py_V9->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V9))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
V9 = (CudaNdarray*)py_V9;
//std::cerr << "c_extract " << V9 << '\n';
if (V9->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V9->nd);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V9)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V9)[0], 0);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V9 << "checking bcast 0 <" << V9->str<< ">\n";
//std::cerr << "c_extract " << V9->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V9)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V9)[0], 0);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V9)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V9)[1], 1);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V9 << "checking bcast 1 <" << V9->str<< ">\n";
//std::cerr << "c_extract " << V9->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V9)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V9)[1], 1);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << "bcast check 1 passed\n";
assert(V9);
Py_INCREF(py_V9);
}
else if (py_V9 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract done " << V9 << '\n';
{
py_V11 = PyList_GET_ITEM(storage_V11, 0);
{Py_XINCREF(py_V11);}
assert(py_V11->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V11))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
V11 = (CudaNdarray*)py_V11;
//std::cerr << "c_extract " << V11 << '\n';
if (V11->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V11->nd);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V11)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V11)[0], 0);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V11 << "checking bcast 0 <" << V11->str<< ">\n";
//std::cerr << "c_extract " << V11->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V11)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V11)[0], 0);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V11)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V11)[1], 1);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V11 << "checking bcast 1 <" << V11->str<< ">\n";
//std::cerr << "c_extract " << V11->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V11)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V11)[1], 1);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << "bcast check 1 passed\n";
assert(V11);
Py_INCREF(py_V11);
}
else if (py_V11 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract done " << V11 << '\n';
{
// Op class GpuElemwise
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} START\n";
//standard elemwise size checks
int dims[2] = {1,1};
int broadcasts_V3[2] = {0, 0};
int broadcasts_V5[2] = {0, 0};
int broadcasts_V7[2] = {1, 1};
int broadcasts_V9[2] = {1, 1};
int broadcasts_V11[2] = {1, 1};
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V3\n";
if (2 != V3->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V3->nd);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i];
if ((!(broadcasts_V3[i] &&
CudaNdarray_HOST_DIMS(V3)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V3)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V3 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 0 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V3)[i],
dims[i]
);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V5\n";
if (2 != V5->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V5->nd);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i];
if ((!(broadcasts_V5[i] &&
CudaNdarray_HOST_DIMS(V5)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V5)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V5 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 1 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V5)[i],
dims[i]
);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V7\n";
if (2 != V7->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V7->nd);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i];
if ((!(broadcasts_V7[i] &&
CudaNdarray_HOST_DIMS(V7)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V7)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V7 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 2 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V7)[i],
dims[i]
);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V9\n";
if (2 != V9->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V9->nd);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V9)[i] : dims[i];
if ((!(broadcasts_V9[i] &&
CudaNdarray_HOST_DIMS(V9)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V9)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V9 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 3 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V9)[i],
dims[i]
);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V11\n";
if (2 != V11->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V11->nd);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V11)[i] : dims[i];
if ((!(broadcasts_V11[i] &&
CudaNdarray_HOST_DIMS(V11)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V11)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V11 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 4 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V11)[i],
dims[i]
);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
Py_XDECREF(V1);
V1 = V3;
Py_INCREF(V1);
for (int i = 0; (i< 2) && (V1); ++i) {
if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" 0 (indices start at 0), working inplace"
" on input 0, has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V1)[i],
dims[i]
);
Py_DECREF(V1);
V1 = NULL;
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
//std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n";
//std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n";
{
//new block so that failure gotos don't skip over variable initialization
//std::cerr << "calling callkernel\n";
if (callkernel_node_81712a72f6e99c01eca2afba8cbaebcd_0(1, 0, dims
, CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3)
, CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5)
, CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7)
, CudaNdarray_DEV_DATA(V9), CudaNdarray_HOST_STRIDES(V9)
, CudaNdarray_DEV_DATA(V11), CudaNdarray_HOST_STRIDES(V11)
, CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1)
))
{
// error
Py_DECREF(V1);
V1 = NULL;
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
else // no error
{
}
}
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} END\n";
__label_13:
double __DUMMY_13;
}
__label_12:
//std::cerr << "cleanup " << py_V11 << " " << V11 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
if (V11)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V11, (V11->ob_refcnt));
Py_XDECREF(V11);
}
//std::cerr << "cleanup done" << py_V11 << "\n";
{Py_XDECREF(py_V11);}
double __DUMMY_12;
}
__label_10:
//std::cerr << "cleanup " << py_V9 << " " << V9 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
if (V9)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V9, (V9->ob_refcnt));
Py_XDECREF(V9);
}
//std::cerr << "cleanup done" << py_V9 << "\n";
{Py_XDECREF(py_V9);}
double __DUMMY_10;
}
__label_8:
//std::cerr << "cleanup " << py_V7 << " " << V7 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
if (V7)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt));
Py_XDECREF(V7);
}
//std::cerr << "cleanup done" << py_V7 << "\n";
{Py_XDECREF(py_V7);}
double __DUMMY_8;
}
__label_6:
//std::cerr << "cleanup " << py_V5 << " " << V5 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
if (V5)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt));
Py_XDECREF(V5);
}
//std::cerr << "cleanup done" << py_V5 << "\n";
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd_executor(__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd* self) {
return self->run();
}
static void __struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (7 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 7, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd* struct_ptr = new __struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5),PyTuple_GET_ITEM(argtuple, 6) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd_executor), struct_ptr, __struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC init81712a72f6e99c01eca2afba8cbaebcd(void){
(void) Py_InitModule("81712a72f6e99c01eca2afba8cbaebcd", MyMethods);
}
| 26622b7c224b8ba35a66ac99b542da51e819cab4.cu | #include <Python.h>
#include <iostream>
#include "theano_mod_helper.h"
#include "cuda_ndarray.cuh"
//////////////////////
//// Support Code
//////////////////////
#define INTDIV_POW2(a, b) (a >> b)
#define INTMOD_POW2(a, b) (a & ((1<<b)-1))
// GpuElemwise{Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, (True, True))
// Input 3 CudaNdarrayType(float32, (True, True))
// Input 4 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_1(unsigned int numEls
, const int dim0
, const float * i0_data, int i0_str_0
, const float * i1_data, int i1_str_0
, const float * i2_data, int i2_str_0
, const float * i3_data, int i3_str_0
, const float * i4_data, int i4_str_0
, float * o0_data, int o0_str_0
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i2_value = i2_data[0];
const float ii_i3_value = i3_data[0];
const float ii_i4_value = i4_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i1_data = i1_data;
float * ii_o0_data = o0_data;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i1_data += pos0 * i1_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i1_data[0] < ii_i2_value ? ii_i2_value : ii_i1_data[0] > ii_i3_value ? ii_i3_value : ii_i1_data[0];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i4_value - ii_i0_data[0];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = ii_i4_value - V_DUMMY_ID__tmp1;
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1);
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3);
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = ii_i0_data[0] * V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5;
o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7;
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, (True, True))
// Input 3 CudaNdarrayType(float32, (True, True))
// Input 4 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_2(unsigned int numEls
, const int dim0, const int dim1
, const float * i0_data, int i0_str_0, int i0_str_1
, const float * i1_data, int i1_str_0, int i1_str_1
, const float * i2_data, int i2_str_0, int i2_str_1
, const float * i3_data, int i3_str_0, int i3_str_1
, const float * i4_data, int i4_str_0, int i4_str_1
, float * o0_data, int o0_str_0, int o0_str_1
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i2_value = i2_data[0];
const float ii_i3_value = i3_data[0];
const float ii_i4_value = i4_data[0];
for (int i = idx; i < numEls; i += numThreads) {
int ii = i;
const float * ii_i0_data = i0_data;
const float * ii_i1_data = i1_data;
float * ii_o0_data = o0_data;
int pos1 = ii % dim1;
ii = ii / dim1;
ii_i0_data += pos1 * i0_str_1;
ii_i1_data += pos1 * i1_str_1;
ii_o0_data += pos1 * o0_str_1;
int pos0 = ii;
ii_i0_data += pos0 * i0_str_0;
ii_i1_data += pos0 * i1_str_0;
ii_o0_data += pos0 * o0_str_0;
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = ii_i1_data[0] < ii_i2_value ? ii_i2_value : ii_i1_data[0] > ii_i3_value ? ii_i3_value : ii_i1_data[0];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i4_value - ii_i0_data[0];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = ii_i4_value - V_DUMMY_ID__tmp1;
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1);
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3);
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = ii_i0_data[0] * V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5;
o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7;
}
ii_o0_data[0] = o0_i;
}
}
// GpuElemwise{Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))}}[(0, 0)]
// node.op.destroy_map={0: [0]}
// Input 0 CudaNdarrayType(float32, matrix)
// Input 1 CudaNdarrayType(float32, matrix)
// Input 2 CudaNdarrayType(float32, (True, True))
// Input 3 CudaNdarrayType(float32, (True, True))
// Input 4 CudaNdarrayType(float32, (True, True))
// Output 0 CudaNdarrayType(float32, matrix)
static __global__ void kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_Ccontiguous (unsigned int numEls
, const float * i0_data
, const float * i1_data
, const float * i2_data
, const float * i3_data
, const float * i4_data
, float * o0_data
)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
const int numThreads = blockDim.x * gridDim.x;
const float ii_i2_value = i2_data[0];
const float ii_i3_value = i3_data[0];
const float ii_i4_value = i4_data[0];
for (int i = idx; i < numEls; i += numThreads) {
npy_float32 o0_i;
{
npy_float32 V_DUMMY_ID__tmp1;
V_DUMMY_ID__tmp1 = i1_data[i] < ii_i2_value ? ii_i2_value : i1_data[i] > ii_i3_value ? ii_i3_value : i1_data[i];
npy_float32 V_DUMMY_ID__tmp2;
V_DUMMY_ID__tmp2 = ii_i4_value - i0_data[i];
npy_float32 V_DUMMY_ID__tmp3;
V_DUMMY_ID__tmp3 = ii_i4_value - V_DUMMY_ID__tmp1;
npy_float32 V_DUMMY_ID__tmp4;
V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1);
npy_float32 V_DUMMY_ID__tmp5;
V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3);
npy_float32 V_DUMMY_ID__tmp6;
V_DUMMY_ID__tmp6 = i0_data[i] * V_DUMMY_ID__tmp4;
npy_float32 V_DUMMY_ID__tmp7;
V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5;
o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7;
}
o0_data[i] = o0_i;
}
}
static void can_collapse_node_81712a72f6e99c01eca2afba8cbaebcd_0(int nd, const int * dims, const int * strides, int collapse[])
{
//can we collapse dims[i] and dims[i-1]
for(int i=nd-1;i>0;i--){
if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd
collapse[i]=1;
}else collapse[i]=0;
}
}
static int callkernel_node_81712a72f6e99c01eca2afba8cbaebcd_0(unsigned int numEls, const int d,
const int * dims,
const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, const float * i3_data, const int * i3_str, const float * i4_data, const int * i4_str,
float * o0_data, const int * o0_str)
{
numEls = dims[0]*dims[1]*1;
int local_dims[2];
int local_str[5][2];
int local_ostr[1][2];
int nd_collapse = 2;
for(int i=0;i<2;i++){//init new dim
local_dims[i]=dims[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[0][i]=i0_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[1][i]=i1_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[2][i]=i2_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[3][i]=i3_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_str[4][i]=i4_str[i];
}
for(int i=0;i<2;i++){//init new strides
local_ostr[0][i]=o0_str[i];
}
for(int id=0;id<nd_collapse;id++){
bool all_broadcast=true;
for(int input_id=0;input_id<5;input_id++){
if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
for(int input_id=0;input_id<1;input_id++){
if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false;
}
if(all_broadcast){
for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
for(int input_id=0;input_id<5;input_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_str[input_id][j-1]=local_str[input_id][j];
}
}
for(int output_id=0;output_id<1;output_id++){
for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array
local_ostr[output_id][j-1]=local_ostr[output_id][j];
}
}
nd_collapse--; id--;
}
}
int nd_collapse_[2] = {1,1};
int nd_collapse_0[2] = {1,1};
can_collapse_node_81712a72f6e99c01eca2afba8cbaebcd_0(nd_collapse, local_dims, local_str[0], nd_collapse_0);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_0[i]==0)
nd_collapse_[i]=0;
}
int nd_collapse_1[2] = {1,1};
can_collapse_node_81712a72f6e99c01eca2afba8cbaebcd_0(nd_collapse, local_dims, local_str[1], nd_collapse_1);
for(int i=0;i<nd_collapse;i++){
if(nd_collapse_1[i]==0)
nd_collapse_[i]=0;
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[0][i-1]=local_str[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[0][j-1]=local_str[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[1][i-1]=local_str[1][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[1][j-1]=local_str[1][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[2][i-1]=local_str[2][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[2][j-1]=local_str[2][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[3][i-1]=local_str[3][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[3][j-1]=local_str[3][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_str[4][i-1]=local_str[4][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_str[4][j-1]=local_str[4][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_ostr[0][i-1]=local_ostr[0][i];//set new strides
for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array
local_ostr[0][j-1]=local_ostr[0][j];
}
}
for(int i=nd_collapse-1;i>0;i--){
if(nd_collapse_[i]==1){
local_dims[i-1]*=local_dims[i];//set new dims
for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array
local_dims[j-1]=local_dims[j];
}
}
for(int i=1, end=nd_collapse;i<end;i++){
if(nd_collapse_[i]==1)nd_collapse--;
}
if(nd_collapse == 1
&& local_str[0][nd_collapse-1]==1 && local_str[1][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1
){nd_collapse=0;}
if(numEls==0) return 0;
switch (nd_collapse==0?0:min(2,nd_collapse)) {
case 0: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, i4_data, o0_data);
//std::cerr << "calling callkernel returned\n";
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_81712a72f6e99c01eca2afba8cbaebcd_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, i4_data, o0_data)");
return -1;
}
return 0;
} break;
case 1: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_1<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], o0_data, local_ostr[0][0]);
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_81712a72f6e99c01eca2afba8cbaebcd_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], o0_data, local_ostr[0][0])");
return -1;
}
return 0;
} break;
case 2: {
//first use at least a full warp
int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE
//next start adding multiprocessors
int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS
// next start adding more warps per multiprocessor
if (threads_per_block * n_blocks < numEls)
threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK);
kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_2<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], o0_data, local_ostr[0][0], local_ostr[0][1]);
CNDA_THREAD_SYNC;
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n",
"GpuElemwise node_81712a72f6e99c01eca2afba8cbaebcd_0 Composite", cudaGetErrorString(err),
n_blocks, threads_per_block,
"kernel_Composite_node_81712a72f6e99c01eca2afba8cbaebcd_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], o0_data, local_ostr[0][0], local_ostr[0][1])");
return -1;
}
return 0;
} break;
}
return -2;
}
namespace {
struct __struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd {
PyObject* __ERROR;
PyObject* storage_V3;
PyObject* storage_V5;
PyObject* storage_V7;
PyObject* storage_V9;
PyObject* storage_V11;
PyObject* storage_V1;
__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd() {
// This is only somewhat safe because we:
// 1) Are not a virtual class
// 2) Do not use any virtual classes in the members
// 3) Deal with mostly POD and pointers
// If this changes, we would have to revise this, but for
// now I am tired of chasing segfaults because
// initialization code had an error and some pointer has
// a junk value.
memset(this, 0, sizeof(*this));
}
~__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd(void) {
cleanup();
}
int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V11, PyObject* storage_V1) {
Py_XINCREF(storage_V3);
Py_XINCREF(storage_V5);
Py_XINCREF(storage_V7);
Py_XINCREF(storage_V9);
Py_XINCREF(storage_V11);
Py_XINCREF(storage_V1);
this->storage_V3 = storage_V3;
this->storage_V5 = storage_V5;
this->storage_V7 = storage_V7;
this->storage_V9 = storage_V9;
this->storage_V11 = storage_V11;
this->storage_V1 = storage_V1;
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
__label_1:
double __DUMMY_1;
__label_3:
double __DUMMY_3;
__label_5:
double __DUMMY_5;
__label_7:
double __DUMMY_7;
__label_9:
double __DUMMY_9;
__label_11:
double __DUMMY_11;
__label_14:
double __DUMMY_14;
Py_XDECREF(this->storage_V3);
Py_XDECREF(this->storage_V5);
Py_XDECREF(this->storage_V7);
Py_XDECREF(this->storage_V9);
Py_XDECREF(this->storage_V11);
Py_XDECREF(this->storage_V1);
}
int run(void) {
int __failure = 0;
PyObject* py_V1;
CudaNdarray * V1;
PyObject* py_V3;
CudaNdarray * V3;
PyObject* py_V5;
CudaNdarray * V5;
PyObject* py_V7;
CudaNdarray * V7;
PyObject* py_V9;
CudaNdarray * V9;
PyObject* py_V11;
CudaNdarray * V11;
{
py_V1 = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
if (py_V1 == Py_None)
{
V1 = NULL;
}
else
{
assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V1))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
V1 = (CudaNdarray*)py_V1;
//std::cerr << "c_extract " << V1 << '\n';
if (V1->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V1->nd);
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract " << V1 << " nd check passed\n";
assert(V1);
Py_INCREF(py_V1);
}
else if (py_V1 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V1 = NULL;
{
__failure = 2;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_2;};
}
//std::cerr << "c_extract done " << V1 << '\n';
}
{
py_V3 = PyList_GET_ITEM(storage_V3, 0);
{Py_XINCREF(py_V3);}
assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V3))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
V3 = (CudaNdarray*)py_V3;
//std::cerr << "c_extract " << V3 << '\n';
if (V3->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V3->nd);
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract " << V3 << " nd check passed\n";
assert(V3);
Py_INCREF(py_V3);
}
else if (py_V3 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V3 = NULL;
{
__failure = 4;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_4;};
}
//std::cerr << "c_extract done " << V3 << '\n';
{
py_V5 = PyList_GET_ITEM(storage_V5, 0);
{Py_XINCREF(py_V5);}
assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V5))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
V5 = (CudaNdarray*)py_V5;
//std::cerr << "c_extract " << V5 << '\n';
if (V5->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V5->nd);
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract " << V5 << " nd check passed\n";
assert(V5);
Py_INCREF(py_V5);
}
else if (py_V5 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V5 = NULL;
{
__failure = 6;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_6;};
}
//std::cerr << "c_extract done " << V5 << '\n';
{
py_V7 = PyList_GET_ITEM(storage_V7, 0);
{Py_XINCREF(py_V7);}
assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V7))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
V7 = (CudaNdarray*)py_V7;
//std::cerr << "c_extract " << V7 << '\n';
if (V7->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V7->nd);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V7)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V7)[0], 0);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V7 << "checking bcast 0 <" << V7->str<< ">\n";
//std::cerr << "c_extract " << V7->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V7)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V7)[0], 0);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V7)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V7)[1], 1);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V7 << "checking bcast 1 <" << V7->str<< ">\n";
//std::cerr << "c_extract " << V7->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V7)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V7)[1], 1);
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract " << V7 << "bcast check 1 passed\n";
assert(V7);
Py_INCREF(py_V7);
}
else if (py_V7 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V7 = NULL;
{
__failure = 8;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_8;};
}
//std::cerr << "c_extract done " << V7 << '\n';
{
py_V9 = PyList_GET_ITEM(storage_V9, 0);
{Py_XINCREF(py_V9);}
assert(py_V9->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V9))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
V9 = (CudaNdarray*)py_V9;
//std::cerr << "c_extract " << V9 << '\n';
if (V9->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V9->nd);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V9)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V9)[0], 0);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V9 << "checking bcast 0 <" << V9->str<< ">\n";
//std::cerr << "c_extract " << V9->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V9)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V9)[0], 0);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V9)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V9)[1], 1);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V9 << "checking bcast 1 <" << V9->str<< ">\n";
//std::cerr << "c_extract " << V9->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V9)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V9)[1], 1);
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract " << V9 << "bcast check 1 passed\n";
assert(V9);
Py_INCREF(py_V9);
}
else if (py_V9 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V9 = NULL;
{
__failure = 10;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_10;};
}
//std::cerr << "c_extract done " << V9 << '\n';
{
py_V11 = PyList_GET_ITEM(storage_V11, 0);
{Py_XINCREF(py_V11);}
assert(py_V11->ob_refcnt >= 2); // There should be at least one ref from the container object,
// and one ref from the local scope.
if (CudaNdarray_Check(py_V11))
{
//fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
V11 = (CudaNdarray*)py_V11;
//std::cerr << "c_extract " << V11 << '\n';
if (V11->nd != 2)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2",
V11->nd);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << " nd check passed\n";
if (CudaNdarray_HOST_DIMS(V11)[0] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V11)[0], 0);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << "dim check 0 passed\n";
//std::cerr << "c_extract " << V11 << "checking bcast 0 <" << V11->str<< ">\n";
//std::cerr << "c_extract " << V11->str[0] << "\n";
if (CudaNdarray_HOST_STRIDES(V11)[0])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V11)[0], 0);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << "bcast check 0 passed\n";
if (CudaNdarray_HOST_DIMS(V11)[1] != 1)
{
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i",
CudaNdarray_HOST_DIMS(V11)[1], 1);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << "dim check 1 passed\n";
//std::cerr << "c_extract " << V11 << "checking bcast 1 <" << V11->str<< ">\n";
//std::cerr << "c_extract " << V11->str[1] << "\n";
if (CudaNdarray_HOST_STRIDES(V11)[1])
{
//std::cerr << "c_extract bad stride detected...\n";
PyErr_Format(PyExc_RuntimeError,
"c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i",
CudaNdarray_HOST_STRIDES(V11)[1], 1);
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract " << V11 << "bcast check 1 passed\n";
assert(V11);
Py_INCREF(py_V11);
}
else if (py_V11 == Py_None)
{
PyErr_SetString(PyExc_TypeError,
"expected a CudaNdarray, not None");
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
else
{
//fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray");
V11 = NULL;
{
__failure = 12;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_12;};
}
//std::cerr << "c_extract done " << V11 << '\n';
{
// Op class GpuElemwise
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} START\n";
//standard elemwise size checks
int dims[2] = {1,1};
int broadcasts_V3[2] = {0, 0};
int broadcasts_V5[2] = {0, 0};
int broadcasts_V7[2] = {1, 1};
int broadcasts_V9[2] = {1, 1};
int broadcasts_V11[2] = {1, 1};
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V3\n";
if (2 != V3->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V3->nd);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i];
if ((!(broadcasts_V3[i] &&
CudaNdarray_HOST_DIMS(V3)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V3)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V3 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 0 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V3)[i],
dims[i]
);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V5\n";
if (2 != V5->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V5->nd);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i];
if ((!(broadcasts_V5[i] &&
CudaNdarray_HOST_DIMS(V5)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V5)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V5 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 1 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V5)[i],
dims[i]
);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V7\n";
if (2 != V7->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V7->nd);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i];
if ((!(broadcasts_V7[i] &&
CudaNdarray_HOST_DIMS(V7)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V7)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V7 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 2 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V7)[i],
dims[i]
);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V9\n";
if (2 != V9->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V9->nd);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V9)[i] : dims[i];
if ((!(broadcasts_V9[i] &&
CudaNdarray_HOST_DIMS(V9)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V9)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V9 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 3 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V9)[i],
dims[i]
);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V11\n";
if (2 != V11->nd)
{
PyErr_Format(PyExc_TypeError,
"need 2 dims, not %i", V11->nd);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
for (int i = 0; i< 2; ++i)
{
dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V11)[i] : dims[i];
if ((!(broadcasts_V11[i] &&
CudaNdarray_HOST_DIMS(V11)[i] == 1)) &&
(dims[i] != CudaNdarray_HOST_DIMS(V11)[i]))
{
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} checking input V11 failed\n";
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Input dimension mis-match. Input"
" 4 (indices start at 0) has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V11)[i],
dims[i]
);
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
Py_XDECREF(V1);
V1 = V3;
Py_INCREF(V1);
for (int i = 0; (i< 2) && (V1); ++i) {
if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i])
{
PyErr_Format(PyExc_ValueError,
"GpuElemwise. Output dimension mis-match. Output"
" 0 (indices start at 0), working inplace"
" on input 0, has shape[%i] == %i"
", but the output's size on that axis is %i.",
i,
CudaNdarray_HOST_DIMS(V1)[i],
dims[i]
);
Py_DECREF(V1);
V1 = NULL;
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
}
//std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n";
//std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n";
{
//new block so that failure gotos don't skip over variable initialization
//std::cerr << "calling callkernel\n";
if (callkernel_node_81712a72f6e99c01eca2afba8cbaebcd_0(1, 0, dims
, CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3)
, CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5)
, CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7)
, CudaNdarray_DEV_DATA(V9), CudaNdarray_HOST_STRIDES(V9)
, CudaNdarray_DEV_DATA(V11), CudaNdarray_HOST_STRIDES(V11)
, CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1)
))
{
// error
Py_DECREF(V1);
V1 = NULL;
{
__failure = 13;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_13;};
}
else // no error
{
}
}
//std::cerr << "C_CODE Composite{((i0 * log(clip(i1, i2, i3))) + ((i4 - i0) * log((i4 - clip(i1, i2, i3)))))} END\n";
__label_13:
double __DUMMY_13;
}
__label_12:
//std::cerr << "cleanup " << py_V11 << " " << V11 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt));
if (V11)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V11, (V11->ob_refcnt));
Py_XDECREF(V11);
}
//std::cerr << "cleanup done" << py_V11 << "\n";
{Py_XDECREF(py_V11);}
double __DUMMY_12;
}
__label_10:
//std::cerr << "cleanup " << py_V9 << " " << V9 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt));
if (V9)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V9, (V9->ob_refcnt));
Py_XDECREF(V9);
}
//std::cerr << "cleanup done" << py_V9 << "\n";
{Py_XDECREF(py_V9);}
double __DUMMY_10;
}
__label_8:
//std::cerr << "cleanup " << py_V7 << " " << V7 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt));
if (V7)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt));
Py_XDECREF(V7);
}
//std::cerr << "cleanup done" << py_V7 << "\n";
{Py_XDECREF(py_V7);}
double __DUMMY_8;
}
__label_6:
//std::cerr << "cleanup " << py_V5 << " " << V5 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt));
if (V5)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt));
Py_XDECREF(V5);
}
//std::cerr << "cleanup done" << py_V5 << "\n";
{Py_XDECREF(py_V5);}
double __DUMMY_6;
}
__label_4:
//std::cerr << "cleanup " << py_V3 << " " << V3 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt));
if (V3)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt));
Py_XDECREF(V3);
}
//std::cerr << "cleanup done" << py_V3 << "\n";
{Py_XDECREF(py_V3);}
double __DUMMY_4;
}
__label_2:
if (!__failure) {
//std::cerr << "sync\n";
if (NULL == V1) {
// failure: sync None to storage
Py_XDECREF(py_V1);
py_V1 = Py_None;
Py_INCREF(py_V1);
}
else
{
if (py_V1 != (PyObject*)V1)
{
Py_XDECREF(py_V1);
py_V1 = (PyObject*)V1;
Py_INCREF(py_V1);
}
assert(py_V1->ob_refcnt);
}
PyObject* old = PyList_GET_ITEM(storage_V1, 0);
{Py_XINCREF(py_V1);}
PyList_SET_ITEM(storage_V1, 0, py_V1);
{Py_XDECREF(old);}
}
//std::cerr << "cleanup " << py_V1 << " " << V1 << "\n";
//fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt));
if (V1)
{
//fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt));
Py_XDECREF(V1);
}
//std::cerr << "cleanup done" << py_V1 << "\n";
{Py_XDECREF(py_V1);}
double __DUMMY_2;
}
if (__failure) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return __failure;
}
};
}
static int __struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd_executor(__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd* self) {
return self->run();
}
static void __struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd_destructor(void* executor, void* self) {
delete ((__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd*)self);
}
//////////////////////
//// Functions
//////////////////////
static PyObject * instantiate(PyObject * self, PyObject *argtuple) {
assert(PyTuple_Check(argtuple));
if (7 != PyTuple_Size(argtuple)){
PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 7, got %i", (int)PyTuple_Size(argtuple));
return NULL;
}
__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd* struct_ptr = new __struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd();
if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5),PyTuple_GET_ITEM(argtuple, 6) ) != 0) {
delete struct_ptr;
return NULL;
}
PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd_executor), struct_ptr, __struct_compiled_op_81712a72f6e99c01eca2afba8cbaebcd_destructor);
return thunk; }
//////////////////////
//// Module init
//////////////////////
static PyMethodDef MyMethods[] = {
{"instantiate", instantiate, METH_VARARGS, "undocumented"} ,
{NULL, NULL, 0, NULL}
};
PyMODINIT_FUNC init81712a72f6e99c01eca2afba8cbaebcd(void){
(void) Py_InitModule("81712a72f6e99c01eca2afba8cbaebcd", MyMethods);
}
|
a0f11f945f21decb5702915436e1a268f634104a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// nsc.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#include <iostream>
#include <fstream>
using namespace std;
const int nx = 41;
const int ny = 41;
const int nt = 500;
const int nit = 50;
const int ab = 2;
double dx = 2.0 / ((double) nx - 1.0);
double dy = 2.0 / ((double) ny - 1.0);
double rho = 1.0;
double nu = 0.1;
double dt = 0.001;
const int M = 128;
double u[ny * nx], v[ny * nx], p[ny * nx], b[ny * nx];
double * un = (double * ) malloc(nx * ny * sizeof(double));
double * vn = (double * ) malloc(nx * ny * sizeof(double));
__global__ void init() {
for (int i = 0; i < ny; i++)
for (int j = 0; j < nx; j++) {
int k = i * nx + j;
u[k] = 0;
v[k] = 0;
p[k] = 0;
b[k] = 0;
}
// std::cout << "init done" << std:endl;
}
__global__ void bulid_up_b(double * b, double rho, double dt, double * u, double * v, double dx, double dy) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = blockIdx.y;
int index = idy * nx + idx;
b[index] = rho * (1 / dt * ((u[index + 1] - u[index - 1]) / (2 * dx) + (v[index + nx] - v[index - nx]) / (2 * dy)) -
pow((u[index + 1] - u[index - 1]) / (2 * dx), 2.0) -
2 * ((u[(i + 1) * nx + j] - u[(i - 1) * nx + j]) / (2 * dy) *
(v[index + 1] - v[index - 1]) / (2 * dx)) -
pow((v[(i + 1) * nx + j] - v[(i) * nx + j]) / (2 * dy), 2.0));
}
__global__ void pressure_poisson(double * p, double dx, double dy, double * b) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = blockIdx.y;
int index = idy * nx + idx;
double * pn = (double * ) malloc(nx * ny * sizeof(double));
for (int i = 0; i < (nx * ny); i++)
pn[i] = p[i];
if (idx > 0 && idx < nx - 1 && idy > 0 && idy < ny - 1) {
p[index] = (((pn[index + 1] + pn[index - 1]) * pow(dy, 2) +
(pn[index + nx] + pn[index - nx]) * pow(dx, 2)) /
(2 * (pow(dx, 2) + pow(dy, 2))) -
pow(dx, 2) * pow(dy, 2) / (2 * (pow(dx, 2) + pow(dy, 2))) *
b[index]);
if (idx == 0 && idy < ny) {
p[index] = p[index + 1]; // dp/dx = 0 at left boundary
} else if (idx == nx - 1 && idy < ny) {
p[index] = p[index - 1]; // dp/dx = 0 at right boundary
} else if (idy == 0 && idx < nx) {
p[index] = p[index + nx]; // dp/dy = 0 at bottom boundary
} else if (idy == ny - 1 && idx < nx) {
p[index] = 0.0; // p = 0 at top boundary
}
}
}
void cavity_flow(double * u, double * v, double dt, double dx, double dy,
double * p, double rho, double nu, double * b) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = blockIdx.y;
int index = idy * nx + idx;
for (int k = 0; k < nt; k++) {
for (int i = 0; i < nx * ny; i++) {
un[i] = u[i];
vn[i] = v[i];
}
}
if (idx > 0 && idx < nx - 1 && idy > 0 && idy < ny - 1) {
u[index] = un[index] -
un[index] * dt / dx * (un[index] - un[index - 1]) -
vn[index] * dt / dy * (un[index] - un[index - nx]) -
dt / (2 * rho * dx) * (p[index + 1] - p[index - 1]) +
nu * (dt / pow(dx, 2.0) * (un[index + 1] - 2 * un[index] + un[index - 1]) +
dt / pow(dy, 2.0) * (un[index + nx] - 2 * un[index] + un[index - nx]));
v[index] = vn[index] -
un[index] * dt / dx * (vn[index] - vn[index - 1]) -
vn[index] * dt / dy * (vn[index] - vn[index - nx]) -
dt / (2 * rho * dy) * (p[index + nx] - p[index - nx]) +
nu * (dt / pow(dx, 2.0) * (vn[index + 1] - 2 * vn[index] + vn[index - 1]) +
dt / pow(dy, 2.0) * (vn[index + nx] - 2 * vn[index] + vn[index - xn]));
}
if (idx < nx) {
u[0 * nx + idx] = 0.0;
u[(nx - 1) * nx + idx] = 1.0;
v[0 * nx + idx] = 0.0;
v[(nx - 1) * nx + idx] = 0.0;
}
if (idy < ny) {
u[idy * nx + 0] = 0.0;
u[idy * nx + nx - 1] = 0.0;
v[idy * nx + 0] = 0.0;
v[idy * nx + nx - 1] = 0.0;
}
}
int main() {
init();
double * u, * v, * p, * uo, * vo, * po, * b;
int size = ny * nx * sizeof(double);
hipMallocManaged( & u, size);
hipMallocManaged( & v, size);
hipMallocManaged( & p, size);
hipMallocManaged( & uo, size);
hipMallocManaged( & vo, size);
hipMallocManaged( & po, size);
hipMallocManaged( & b, size);
dim3 block(M, 1);
dim3 grid((nx + M - 1) / M, ny);
for (int j = 0; j < ny; j++) {
for (int i = 0; i < nx; i++) {
u[j * nx + i] = 0;
v[j * nx + i] = 0;
p[j * nx + i] = 0;
po[j * nx + i] = 0;
b[j * nx + i] = 0;
}
for (int n = 0; n <= nt; n++) {
bulid_up_b << < grid, block >>> (b, rho, dt, u, v, dx, dy);
hipDeviceSynchronize();
pressure_poisson << < grid, block >>> (p, dx, dy, b);
hipDeviceSynchronize();
cavity_flow << < grid, block >>> (u, v, dt, dx, dy, p, rho, nu, b);
}
}
ofstream file("out.txt");
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j++)
file << un[nx * i + j] << " ";
file << "\n";
}
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j++)
file << vn[nx * i + j] << " ";
file << "\n";
}
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j++)
file << p[nx * i + j] << " ";
file << "\n";
}
file.close();
hipFree(u);
hipFree(v);
hipFree(p);
hipFree(uo);
hipFree(vo);
hipFree(po);
hipFree(b);
} | a0f11f945f21decb5702915436e1a268f634104a.cu | // nsc.cpp : This file contains the 'main' function. Program execution begins and ends there.
//
#include <iostream>
#include <fstream>
using namespace std;
const int nx = 41;
const int ny = 41;
const int nt = 500;
const int nit = 50;
const int ab = 2;
double dx = 2.0 / ((double) nx - 1.0);
double dy = 2.0 / ((double) ny - 1.0);
double rho = 1.0;
double nu = 0.1;
double dt = 0.001;
const int M = 128;
double u[ny * nx], v[ny * nx], p[ny * nx], b[ny * nx];
double * un = (double * ) malloc(nx * ny * sizeof(double));
double * vn = (double * ) malloc(nx * ny * sizeof(double));
__global__ void init() {
for (int i = 0; i < ny; i++)
for (int j = 0; j < nx; j++) {
int k = i * nx + j;
u[k] = 0;
v[k] = 0;
p[k] = 0;
b[k] = 0;
}
// std::cout << "init done" << std:endl;
}
__global__ void bulid_up_b(double * b, double rho, double dt, double * u, double * v, double dx, double dy) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = blockIdx.y;
int index = idy * nx + idx;
b[index] = rho * (1 / dt * ((u[index + 1] - u[index - 1]) / (2 * dx) + (v[index + nx] - v[index - nx]) / (2 * dy)) -
pow((u[index + 1] - u[index - 1]) / (2 * dx), 2.0) -
2 * ((u[(i + 1) * nx + j] - u[(i - 1) * nx + j]) / (2 * dy) *
(v[index + 1] - v[index - 1]) / (2 * dx)) -
pow((v[(i + 1) * nx + j] - v[(i) * nx + j]) / (2 * dy), 2.0));
}
__global__ void pressure_poisson(double * p, double dx, double dy, double * b) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = blockIdx.y;
int index = idy * nx + idx;
double * pn = (double * ) malloc(nx * ny * sizeof(double));
for (int i = 0; i < (nx * ny); i++)
pn[i] = p[i];
if (idx > 0 && idx < nx - 1 && idy > 0 && idy < ny - 1) {
p[index] = (((pn[index + 1] + pn[index - 1]) * pow(dy, 2) +
(pn[index + nx] + pn[index - nx]) * pow(dx, 2)) /
(2 * (pow(dx, 2) + pow(dy, 2))) -
pow(dx, 2) * pow(dy, 2) / (2 * (pow(dx, 2) + pow(dy, 2))) *
b[index]);
if (idx == 0 && idy < ny) {
p[index] = p[index + 1]; // dp/dx = 0 at left boundary
} else if (idx == nx - 1 && idy < ny) {
p[index] = p[index - 1]; // dp/dx = 0 at right boundary
} else if (idy == 0 && idx < nx) {
p[index] = p[index + nx]; // dp/dy = 0 at bottom boundary
} else if (idy == ny - 1 && idx < nx) {
p[index] = 0.0; // p = 0 at top boundary
}
}
}
void cavity_flow(double * u, double * v, double dt, double dx, double dy,
double * p, double rho, double nu, double * b) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = blockIdx.y;
int index = idy * nx + idx;
for (int k = 0; k < nt; k++) {
for (int i = 0; i < nx * ny; i++) {
un[i] = u[i];
vn[i] = v[i];
}
}
if (idx > 0 && idx < nx - 1 && idy > 0 && idy < ny - 1) {
u[index] = un[index] -
un[index] * dt / dx * (un[index] - un[index - 1]) -
vn[index] * dt / dy * (un[index] - un[index - nx]) -
dt / (2 * rho * dx) * (p[index + 1] - p[index - 1]) +
nu * (dt / pow(dx, 2.0) * (un[index + 1] - 2 * un[index] + un[index - 1]) +
dt / pow(dy, 2.0) * (un[index + nx] - 2 * un[index] + un[index - nx]));
v[index] = vn[index] -
un[index] * dt / dx * (vn[index] - vn[index - 1]) -
vn[index] * dt / dy * (vn[index] - vn[index - nx]) -
dt / (2 * rho * dy) * (p[index + nx] - p[index - nx]) +
nu * (dt / pow(dx, 2.0) * (vn[index + 1] - 2 * vn[index] + vn[index - 1]) +
dt / pow(dy, 2.0) * (vn[index + nx] - 2 * vn[index] + vn[index - xn]));
}
if (idx < nx) {
u[0 * nx + idx] = 0.0;
u[(nx - 1) * nx + idx] = 1.0;
v[0 * nx + idx] = 0.0;
v[(nx - 1) * nx + idx] = 0.0;
}
if (idy < ny) {
u[idy * nx + 0] = 0.0;
u[idy * nx + nx - 1] = 0.0;
v[idy * nx + 0] = 0.0;
v[idy * nx + nx - 1] = 0.0;
}
}
int main() {
init();
double * u, * v, * p, * uo, * vo, * po, * b;
int size = ny * nx * sizeof(double);
cudaMallocManaged( & u, size);
cudaMallocManaged( & v, size);
cudaMallocManaged( & p, size);
cudaMallocManaged( & uo, size);
cudaMallocManaged( & vo, size);
cudaMallocManaged( & po, size);
cudaMallocManaged( & b, size);
dim3 block(M, 1);
dim3 grid((nx + M - 1) / M, ny);
for (int j = 0; j < ny; j++) {
for (int i = 0; i < nx; i++) {
u[j * nx + i] = 0;
v[j * nx + i] = 0;
p[j * nx + i] = 0;
po[j * nx + i] = 0;
b[j * nx + i] = 0;
}
for (int n = 0; n <= nt; n++) {
bulid_up_b << < grid, block >>> (b, rho, dt, u, v, dx, dy);
cudaDeviceSynchronize();
pressure_poisson << < grid, block >>> (p, dx, dy, b);
cudaDeviceSynchronize();
cavity_flow << < grid, block >>> (u, v, dt, dx, dy, p, rho, nu, b);
}
}
ofstream file("out.txt");
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j++)
file << un[nx * i + j] << " ";
file << "\n";
}
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j++)
file << vn[nx * i + j] << " ";
file << "\n";
}
for (int i = 0; i < ny; i++) {
for (int j = 0; j < nx; j++)
file << p[nx * i + j] << " ";
file << "\n";
}
file.close();
cudaFree(u);
cudaFree(v);
cudaFree(p);
cudaFree(uo);
cudaFree(vo);
cudaFree(po);
cudaFree(b);
} |
f7391988d8a6c3e73adcff3024352040973cd075.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "tools.hpp"
#include "../cpu_anim.h"
#include "../book.h"
#include <iostream>
#include <iomanip>
using std::cout;
using std::endl;
using std::ios;
//
//
texture<float> textureConstSrc;
texture<float> textureIn;
texture<float> textureOut;
__global__ void copy_const_kernel_tex1D(float *iptr) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float c = tex1Dfetch(textureConstSrc, offset);
if (c != 0) iptr[offset] = c;
}
__global__ void blend_kernel_tex1D(float *dst, bool dstOut) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int right = offset + 1;
if (x == 0) ++left;
if (x == DIM - 1) --right;
int up = offset - DIM;
int down = offset + DIM;
if (y == 0) up += DIM;
if (y == DIM - 1) down -= DIM;
float t;
float d;
float l;
float r;
float current;
if (dstOut) {
//
t = tex1Dfetch(textureIn, up);
d = tex1Dfetch(textureIn, down);
l = tex1Dfetch(textureIn, left);
r = tex1Dfetch(textureIn, right);
current = tex1Dfetch(textureIn, offset);
} else {
t = tex1Dfetch(textureOut, up);
d = tex1Dfetch(textureOut, down);
l = tex1Dfetch(textureOut, left);
r = tex1Dfetch(textureOut, right);
current = tex1Dfetch(textureOut, offset);
}
dst[offset]= current + SPEED * (t + d + l + r - 4 * current);
}
static void AnimGPU(DataBlock *d, int ticks) {
CUDA_CHECK_ERROR(hipEventRecord(d->start, 0));
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
volatile bool dstOut = true;
for (int i = 0; i < 99; ++i) {
float *in;
float *out;
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
} else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
hipLaunchKernelGGL(( copy_const_kernel_tex1D), dim3(grids), dim3(threads), 0, 0, in);
hipLaunchKernelGGL(( blend_kernel_tex1D), dim3(grids), dim3(threads), 0, 0, out,dstOut);
dstOut = !dstOut;
}
hipLaunchKernelGGL(( float_to_color), dim3(grids), dim3(threads), 0, 0, d->outputBitmap, d->dev_inSrc);
CUDA_CHECK_ERROR(hipMemcpy(bitmap->get_ptr(), d->outputBitmap,
bitmap->image_size(), hipMemcpyDeviceToHost));
CUDA_CHECK_ERROR(hipEventRecord(d->stop, 0));
CUDA_CHECK_ERROR(hipEventSynchronize(d->stop));
float elapsedTime;
CUDA_CHECK_ERROR(hipEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime += elapsedTime;
++d->frame;
cout.setf(ios::fixed);
cout << "Average Time per frame: " << std::setw(4)
<< d->totalTime / d->frame <<"ms" << endl;
}
static void AnimExit(DataBlock *d) {
//
CUDA_CHECK_ERROR(hipUnbindTexture(textureIn));
CUDA_CHECK_ERROR(hipUnbindTexture(textureOut));
CUDA_CHECK_ERROR(hipUnbindTexture(textureConstSrc));
CUDA_CHECK_ERROR(hipFree(d->dev_constSrc));
CUDA_CHECK_ERROR(hipFree(d->dev_inSrc));
CUDA_CHECK_ERROR(hipFree(d->dev_outSrc));
CUDA_CHECK_ERROR(hipEventDestroy(d->start));
CUDA_CHECK_ERROR(hipEventDestroy(d->stop));
}
void NaiveThermalConductionAnimWith1DTexture() {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frame = 0;
CUDA_CHECK_ERROR(hipEventCreate(&data.start));
CUDA_CHECK_ERROR(hipEventCreate(&data.stop));
CUDA_CHECK_ERROR(hipMalloc((void**)&data.outputBitmap, bitmap.image_size()));
CUDA_CHECK_ERROR(hipMalloc((void**)&data.dev_inSrc, bitmap.image_size()));
CUDA_CHECK_ERROR(hipMalloc((void**)&data.dev_outSrc, bitmap.image_size()));
CUDA_CHECK_ERROR(hipMalloc((void**)&data.dev_constSrc, bitmap.image_size()));
//
CUDA_CHECK_ERROR(hipBindTexture(NULL, textureConstSrc, data.dev_constSrc,
bitmap.image_size()));
CUDA_CHECK_ERROR(hipBindTexture(NULL, textureIn, data.dev_inSrc,
bitmap.image_size()));
CUDA_CHECK_ERROR(hipBindTexture(NULL, textureOut, data.dev_outSrc,
bitmap.image_size()));
float *temp = new float[bitmap.image_size()];
for (int i = 0; i < DIM * DIM; ++i) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x > 300) && (x < 600) && (y > 310) && (y < 601))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y < 900; ++y) {
for (int x = 400; x < 500; ++x)
temp[x + y * DIM] = MIN_TEMP;
}
CUDA_CHECK_ERROR(hipMemcpy(data.dev_constSrc, temp, bitmap.image_size(),
hipMemcpyHostToDevice));
for (int y = 800; y < DIM; ++y) {
for (int x = 0; x < 200; ++x) {
temp[x + y * DIM] = MAX_TEMP;
}
}
CUDA_CHECK_ERROR(hipMemcpy(data.dev_inSrc, temp, bitmap.image_size(),
hipMemcpyHostToDevice));
delete [] temp;
bitmap.anim_and_exit((void (*)(void *, int))AnimGPU, ((void(*)(void*))AnimExit));
}
| f7391988d8a6c3e73adcff3024352040973cd075.cu | #include "tools.hpp"
#include "../cpu_anim.h"
#include "../book.h"
#include <iostream>
#include <iomanip>
using std::cout;
using std::endl;
using std::ios;
// 纹理内存必须声明为文件作用域的全局变量
// 并且不能作为参数传递
texture<float> textureConstSrc;
texture<float> textureIn;
texture<float> textureOut;
__global__ void copy_const_kernel_tex1D(float *iptr) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float c = tex1Dfetch(textureConstSrc, offset);
if (c != 0) iptr[offset] = c;
}
__global__ void blend_kernel_tex1D(float *dst, bool dstOut) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
int left = offset - 1;
int right = offset + 1;
if (x == 0) ++left;
if (x == DIM - 1) --right;
int up = offset - DIM;
int down = offset + DIM;
if (y == 0) up += DIM;
if (y == DIM - 1) down -= DIM;
float t;
float d;
float l;
float r;
float current;
if (dstOut) {
// 纹理内存的专用读取函数
t = tex1Dfetch(textureIn, up);
d = tex1Dfetch(textureIn, down);
l = tex1Dfetch(textureIn, left);
r = tex1Dfetch(textureIn, right);
current = tex1Dfetch(textureIn, offset);
} else {
t = tex1Dfetch(textureOut, up);
d = tex1Dfetch(textureOut, down);
l = tex1Dfetch(textureOut, left);
r = tex1Dfetch(textureOut, right);
current = tex1Dfetch(textureOut, offset);
}
dst[offset]= current + SPEED * (t + d + l + r - 4 * current);
}
static void AnimGPU(DataBlock *d, int ticks) {
CUDA_CHECK_ERROR(cudaEventRecord(d->start, 0));
dim3 grids(DIM / 16, DIM / 16);
dim3 threads(16, 16);
CPUAnimBitmap *bitmap = d->bitmap;
volatile bool dstOut = true;
for (int i = 0; i < 99; ++i) {
float *in;
float *out;
if (dstOut) {
in = d->dev_inSrc;
out = d->dev_outSrc;
} else {
out = d->dev_inSrc;
in = d->dev_outSrc;
}
copy_const_kernel_tex1D<<<grids, threads>>>(in);
blend_kernel_tex1D<<<grids, threads>>>(out,dstOut);
dstOut = !dstOut;
}
float_to_color<<<grids, threads>>>(d->outputBitmap, d->dev_inSrc);
CUDA_CHECK_ERROR(cudaMemcpy(bitmap->get_ptr(), d->outputBitmap,
bitmap->image_size(), cudaMemcpyDeviceToHost));
CUDA_CHECK_ERROR(cudaEventRecord(d->stop, 0));
CUDA_CHECK_ERROR(cudaEventSynchronize(d->stop));
float elapsedTime;
CUDA_CHECK_ERROR(cudaEventElapsedTime(&elapsedTime, d->start, d->stop));
d->totalTime += elapsedTime;
++d->frame;
cout.setf(ios::fixed);
cout << "Average Time per frame: " << std::setw(4)
<< d->totalTime / d->frame <<"ms" << endl;
}
static void AnimExit(DataBlock *d) {
// 解绑定纹理内存
CUDA_CHECK_ERROR(cudaUnbindTexture(textureIn));
CUDA_CHECK_ERROR(cudaUnbindTexture(textureOut));
CUDA_CHECK_ERROR(cudaUnbindTexture(textureConstSrc));
CUDA_CHECK_ERROR(cudaFree(d->dev_constSrc));
CUDA_CHECK_ERROR(cudaFree(d->dev_inSrc));
CUDA_CHECK_ERROR(cudaFree(d->dev_outSrc));
CUDA_CHECK_ERROR(cudaEventDestroy(d->start));
CUDA_CHECK_ERROR(cudaEventDestroy(d->stop));
}
void NaiveThermalConductionAnimWith1DTexture() {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
data.totalTime = 0;
data.frame = 0;
CUDA_CHECK_ERROR(cudaEventCreate(&data.start));
CUDA_CHECK_ERROR(cudaEventCreate(&data.stop));
CUDA_CHECK_ERROR(cudaMalloc((void**)&data.outputBitmap, bitmap.image_size()));
CUDA_CHECK_ERROR(cudaMalloc((void**)&data.dev_inSrc, bitmap.image_size()));
CUDA_CHECK_ERROR(cudaMalloc((void**)&data.dev_outSrc, bitmap.image_size()));
CUDA_CHECK_ERROR(cudaMalloc((void**)&data.dev_constSrc, bitmap.image_size()));
// 绑定纹理内存
CUDA_CHECK_ERROR(cudaBindTexture(NULL, textureConstSrc, data.dev_constSrc,
bitmap.image_size()));
CUDA_CHECK_ERROR(cudaBindTexture(NULL, textureIn, data.dev_inSrc,
bitmap.image_size()));
CUDA_CHECK_ERROR(cudaBindTexture(NULL, textureOut, data.dev_outSrc,
bitmap.image_size()));
float *temp = new float[bitmap.image_size()];
for (int i = 0; i < DIM * DIM; ++i) {
temp[i] = 0;
int x = i % DIM;
int y = i / DIM;
if ((x > 300) && (x < 600) && (y > 310) && (y < 601))
temp[i] = MAX_TEMP;
}
temp[DIM * 100 + 100] = (MAX_TEMP + MIN_TEMP) / 2;
temp[DIM * 700 + 100] = MIN_TEMP;
temp[DIM * 300 + 300] = MIN_TEMP;
temp[DIM * 200 + 700] = MIN_TEMP;
for (int y = 800; y < 900; ++y) {
for (int x = 400; x < 500; ++x)
temp[x + y * DIM] = MIN_TEMP;
}
CUDA_CHECK_ERROR(cudaMemcpy(data.dev_constSrc, temp, bitmap.image_size(),
cudaMemcpyHostToDevice));
for (int y = 800; y < DIM; ++y) {
for (int x = 0; x < 200; ++x) {
temp[x + y * DIM] = MAX_TEMP;
}
}
CUDA_CHECK_ERROR(cudaMemcpy(data.dev_inSrc, temp, bitmap.image_size(),
cudaMemcpyHostToDevice));
delete [] temp;
bitmap.anim_and_exit((void (*)(void *, int))AnimGPU, ((void(*)(void*))AnimExit));
}
|
c9d262536accab6ee7d6fef1226ad4903825918d.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| c9d262536accab6ee7d6fef1226ad4903825918d.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCxHWx<4>;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp<
int8_t, 4, int32_t, int32_t, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t,
LayoutDst, int32_t, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 16, true,
cutlass::arch::OpMultiplyAddSaturate>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
76a8b9b9b646279d8096f225fd8f09ed1a5c9514.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <hip/hip_runtime.h>
#include "conv.h"
int main(int argc, char **argv)
{
if (argc != 4) {
printf("Usage: %s <image width> <image height> <repeat>\n", argv[0]);
return 1;
}
const unsigned int imageW = atoi(argv[1]);
const unsigned int imageH = atoi(argv[2]);
const int numIterations = atoi(argv[3]);
float* h_Kernel = (float*)malloc(KERNEL_LENGTH * sizeof(float));
float* h_Input = (float*)malloc(imageW * imageH * sizeof(float));
float* h_Buffer = (float*)malloc(imageW * imageH * sizeof(float));
float* h_OutputCPU = (float*)malloc(imageW * imageH * sizeof(float));
float* h_OutputGPU = (float*)malloc(imageW * imageH * sizeof(float));
srand(2009);
for(unsigned int i = 0; i < KERNEL_LENGTH; i++)
h_Kernel[i] = (float)(rand() % 16);
for(unsigned int i = 0; i < imageW * imageH; i++)
h_Input[i] = (float)(rand() % 16);
float* d_Kernel;
hipMalloc((void**)&d_Kernel, sizeof(float)*KERNEL_LENGTH);
hipMemcpy(d_Kernel, h_Kernel, sizeof(float)*KERNEL_LENGTH, hipMemcpyHostToDevice);
float* d_Input;
hipMalloc((void**)&d_Input, sizeof(float)*imageW*imageH);
hipMemcpy(d_Input, h_Input, sizeof(float)*imageW*imageH, hipMemcpyHostToDevice);
float* d_Buffer;
hipMalloc((void**)&d_Buffer, sizeof(float)*imageW*imageH);
float* d_Output;
hipMalloc((void**)&d_Output, sizeof(float)*imageW*imageH);
//Just a single run or a warmup iteration
convolutionRows(
d_Buffer,
d_Input,
d_Kernel,
imageW,
imageH,
imageW);
convolutionColumns(
d_Output,
d_Buffer,
d_Kernel,
imageW,
imageH,
imageW);
hipDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int iter = 0; iter < numIterations; iter++) {
convolutionRows(
d_Buffer,
d_Input,
d_Kernel,
imageW,
imageH,
imageW);
convolutionColumns(
d_Output,
d_Buffer,
d_Kernel,
imageW,
imageH,
imageW);
}
hipDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / numIterations);
hipMemcpy(h_OutputGPU, d_Output, sizeof(float)*imageW * imageH, hipMemcpyDeviceToHost);
printf("Comparing against Host/C++ computation...\n");
convolutionRowHost(h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS);
convolutionColumnHost(h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS);
double sum = 0, delta = 0;
double L2norm;
for(unsigned int i = 0; i < imageW * imageH; i++){
delta += (h_OutputCPU[i] - h_OutputGPU[i]) * (h_OutputCPU[i] - h_OutputGPU[i]);
sum += h_OutputCPU[i] * h_OutputCPU[i];
}
L2norm = sqrt(delta / sum);
printf("Relative L2 norm: %.3e\n\n", L2norm);
free(h_OutputGPU);
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Kernel);
hipFree(d_Kernel);
hipFree(d_Input);
hipFree(d_Buffer);
hipFree(d_Output);
printf("%s\n", L2norm < 1e-6 ? "PASS" : "FAIL");
return 0;
}
| 76a8b9b9b646279d8096f225fd8f09ed1a5c9514.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
#include <cuda.h>
#include "conv.h"
int main(int argc, char **argv)
{
if (argc != 4) {
printf("Usage: %s <image width> <image height> <repeat>\n", argv[0]);
return 1;
}
const unsigned int imageW = atoi(argv[1]);
const unsigned int imageH = atoi(argv[2]);
const int numIterations = atoi(argv[3]);
float* h_Kernel = (float*)malloc(KERNEL_LENGTH * sizeof(float));
float* h_Input = (float*)malloc(imageW * imageH * sizeof(float));
float* h_Buffer = (float*)malloc(imageW * imageH * sizeof(float));
float* h_OutputCPU = (float*)malloc(imageW * imageH * sizeof(float));
float* h_OutputGPU = (float*)malloc(imageW * imageH * sizeof(float));
srand(2009);
for(unsigned int i = 0; i < KERNEL_LENGTH; i++)
h_Kernel[i] = (float)(rand() % 16);
for(unsigned int i = 0; i < imageW * imageH; i++)
h_Input[i] = (float)(rand() % 16);
float* d_Kernel;
cudaMalloc((void**)&d_Kernel, sizeof(float)*KERNEL_LENGTH);
cudaMemcpy(d_Kernel, h_Kernel, sizeof(float)*KERNEL_LENGTH, cudaMemcpyHostToDevice);
float* d_Input;
cudaMalloc((void**)&d_Input, sizeof(float)*imageW*imageH);
cudaMemcpy(d_Input, h_Input, sizeof(float)*imageW*imageH, cudaMemcpyHostToDevice);
float* d_Buffer;
cudaMalloc((void**)&d_Buffer, sizeof(float)*imageW*imageH);
float* d_Output;
cudaMalloc((void**)&d_Output, sizeof(float)*imageW*imageH);
//Just a single run or a warmup iteration
convolutionRows(
d_Buffer,
d_Input,
d_Kernel,
imageW,
imageH,
imageW);
convolutionColumns(
d_Output,
d_Buffer,
d_Kernel,
imageW,
imageH,
imageW);
cudaDeviceSynchronize();
auto start = std::chrono::steady_clock::now();
for(int iter = 0; iter < numIterations; iter++) {
convolutionRows(
d_Buffer,
d_Input,
d_Kernel,
imageW,
imageH,
imageW);
convolutionColumns(
d_Output,
d_Buffer,
d_Kernel,
imageW,
imageH,
imageW);
}
cudaDeviceSynchronize();
auto end = std::chrono::steady_clock::now();
auto time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
printf("Average kernel execution time %f (s)\n", (time * 1e-9f) / numIterations);
cudaMemcpy(h_OutputGPU, d_Output, sizeof(float)*imageW * imageH, cudaMemcpyDeviceToHost);
printf("Comparing against Host/C++ computation...\n");
convolutionRowHost(h_Buffer, h_Input, h_Kernel, imageW, imageH, KERNEL_RADIUS);
convolutionColumnHost(h_OutputCPU, h_Buffer, h_Kernel, imageW, imageH, KERNEL_RADIUS);
double sum = 0, delta = 0;
double L2norm;
for(unsigned int i = 0; i < imageW * imageH; i++){
delta += (h_OutputCPU[i] - h_OutputGPU[i]) * (h_OutputCPU[i] - h_OutputGPU[i]);
sum += h_OutputCPU[i] * h_OutputCPU[i];
}
L2norm = sqrt(delta / sum);
printf("Relative L2 norm: %.3e\n\n", L2norm);
free(h_OutputGPU);
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Kernel);
cudaFree(d_Kernel);
cudaFree(d_Input);
cudaFree(d_Buffer);
cudaFree(d_Output);
printf("%s\n", L2norm < 1e-6 ? "PASS" : "FAIL");
return 0;
}
|
a644161ce9067c3b7b526b12837c07b282935f23.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
// includes, kernels
#include "backprop_cuda_kernel.cu"
#include "backprop.h"
#define ITERATIONS 1
////////////////////////////////////////////////////////////////////////////////
extern "C"
void bpnn_layerforward(float *l1, float *l2, float **conn, int n1, int n2);
extern "C"
void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err);
extern "C"
void bpnn_hidden_error(float *delta_h, int nh, float *delta_o, int no, float **who, float *hidden, float *err);
extern "C"
void bpnn_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw);
extern "C"
int setup(int argc, char** argv);
extern "C"
float **alloc_2d_dbl(int m, int n);
extern "C"
float squash(float x);
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
unsigned int num_threads = 0;
unsigned int num_blocks = 0;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
setup(argc, argv);
}
extern "C"
void bpnn_train_cuda(BPNN *net, float *eo, float *eh)
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
int m = 0;
float *input_hidden_cuda;
float *input_cuda;
float *output_hidden_cuda;
float *partial_sum;
float *hidden_partial_sum;
float *hidden_delta_cuda;
float *input_prev_weights_cuda;
float sum;
float *input_weights_one_dim;
float *input_weights_prev_one_dim;
num_blocks = in / 16;
dim3 grid( 1 , num_blocks);
dim3 threads(16 , 16);
hipMallocManaged(&input_cuda, (in + 1) * sizeof(float));
hipMallocManaged(&output_hidden_cuda, (hid + 1) * sizeof(float));
hipMallocManaged(&input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float));
hipMallocManaged(&hidden_partial_sum, num_blocks * WIDTH * sizeof(float));
hipMallocManaged(&input_prev_weights_cuda, (in + 1) * (hid + 1) * sizeof(float));
memcpy(input_cuda,net->input_units, (in + 1) *sizeof(float));
// this preprocessing stage is added to correct the bugs of wrong memcopy using two-dimensional net->inputweights
for (int k = 0; k <= in; k++) {
for (int j = 0; j <= hid; j++) {
input_hidden_cuda[m] = net->input_weights[k][j];
input_prev_weights_cuda[m] = net-> input_prev_weights[k][j];
m++;
}
}
#ifdef PREF
hipStream_t stream1;
hipStream_t stream2;
hipStream_t stream3;
hipStream_t stream4;
hipStream_t stream5;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
hipStreamCreate(&stream4);
hipStreamCreate(&stream5);
hipMemPrefetchAsync(input_cuda,(in + 1) * sizeof(float), 0, stream1 );
hipMemPrefetchAsync(output_hidden_cuda,(hid + 1) * sizeof(float), 0, stream2 );
hipMemPrefetchAsync(input_hidden_cuda,(in + 1) * (hid + 1) * sizeof(float), 0, stream3 );
hipMemPrefetchAsync(hidden_partial_sum,num_blocks * WIDTH * sizeof(float), 0, stream4 );
hipStreamSynchronize(stream1);
hipStreamSynchronize(stream2);
hipStreamSynchronize(stream3);
hipStreamSynchronize(stream4);
#endif
#ifdef PREF
printf("Performing GPU computation\n");
for(int i = 0; i < ITERATIONS; i ++){
hipLaunchKernelGGL(( bpnn_layerforward_CUDA), dim3(grid), dim3(threads), 0, stream5 , input_cuda,
output_hidden_cuda,
input_hidden_cuda,
hidden_partial_sum,
in,
hid);
hipDeviceSynchronize();
}
#else
printf("Performing GPU computation\n");
for(int i = 0; i < ITERATIONS; i ++){
hipLaunchKernelGGL(( bpnn_layerforward_CUDA), dim3(grid), dim3(threads) , 0, 0, input_cuda,
output_hidden_cuda,
input_hidden_cuda,
hidden_partial_sum,
in,
hid);
hipDeviceSynchronize();
}
#endif
// hipMemcpy(partial_sum, hidden_partial_sum, num_blocks * WIDTH * sizeof(float), hipMemcpyDeviceToHost);
for (int j = 1; j <= hid; j++) {
sum = 0.0;
for (int k = 0; k < num_blocks; k++) {
sum += hidden_partial_sum[k * hid + j-1] ;
}
sum += net->input_weights[0][j];
net-> hidden_units[j] = float(1.0 / (1.0 + exp(-sum)));
}
bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out);
bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err);
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights);
hipMallocManaged( &hidden_delta_cuda, (hid + 1) * sizeof(float));
memcpy(hidden_delta_cuda, net->hidden_delta,(hid + 1)* sizeof(float));
#ifdef PREF
hipMemPrefetchAsync(input_prev_weights_cuda, (in + 1) * (hid + 1) * sizeof(float), 0, stream1 );
hipMemPrefetchAsync(hidden_delta_cuda,(hid + 1) * sizeof(float), 0, stream2 );
// hipMemPrefetchAsync(input_cuda,(in + 1) * sizeof(float), 0, stream3 );
// hipMemPrefetchAsync(input_hidden_cuda,(in + 1) * (hid + 1) * sizeof(float), 0, stream4 );
hipStreamSynchronize(stream1);
hipStreamSynchronize(stream2);
// hipStreamSynchronize(stream3);
// hipStreamSynchronize(stream4);
for(int i = 0; i < ITERATIONS; i ++){
hipLaunchKernelGGL(( bpnn_adjust_weights_cuda), dim3(grid), dim3(threads), 0, stream5 , hidden_delta_cuda,
hid,
input_cuda,
in,
input_hidden_cuda,
input_prev_weights_cuda
);
hipDeviceSynchronize();
}
#else
for(int i = 0; i < ITERATIONS; i ++){
hipLaunchKernelGGL(( bpnn_adjust_weights_cuda), dim3(grid), dim3(threads) , 0, 0, hidden_delta_cuda,
hid,
input_cuda,
in,
input_hidden_cuda,
input_prev_weights_cuda
);
hipDeviceSynchronize();
}
#endif
memcpy(net->input_units, input_cuda, (in + 1) * sizeof(float));
hipFree(input_cuda);
hipFree(output_hidden_cuda);
hipFree(input_hidden_cuda);
hipFree(hidden_partial_sum);
hipFree(input_prev_weights_cuda);
hipFree(hidden_delta_cuda);
}
| a644161ce9067c3b7b526b12837c07b282935f23.cu |
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <sys/time.h>
// includes, kernels
#include "backprop_cuda_kernel.cu"
#include "backprop.h"
#define ITERATIONS 1
////////////////////////////////////////////////////////////////////////////////
extern "C"
void bpnn_layerforward(float *l1, float *l2, float **conn, int n1, int n2);
extern "C"
void bpnn_output_error(float *delta, float *target, float *output, int nj, float *err);
extern "C"
void bpnn_hidden_error(float *delta_h, int nh, float *delta_o, int no, float **who, float *hidden, float *err);
extern "C"
void bpnn_adjust_weights(float *delta, int ndelta, float *ly, int nly, float **w, float **oldw);
extern "C"
int setup(int argc, char** argv);
extern "C"
float **alloc_2d_dbl(int m, int n);
extern "C"
float squash(float x);
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
unsigned int num_threads = 0;
unsigned int num_blocks = 0;
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
setup(argc, argv);
}
extern "C"
void bpnn_train_cuda(BPNN *net, float *eo, float *eh)
{
int in, hid, out;
float out_err, hid_err;
in = net->input_n;
hid = net->hidden_n;
out = net->output_n;
int m = 0;
float *input_hidden_cuda;
float *input_cuda;
float *output_hidden_cuda;
float *partial_sum;
float *hidden_partial_sum;
float *hidden_delta_cuda;
float *input_prev_weights_cuda;
float sum;
float *input_weights_one_dim;
float *input_weights_prev_one_dim;
num_blocks = in / 16;
dim3 grid( 1 , num_blocks);
dim3 threads(16 , 16);
cudaMallocManaged(&input_cuda, (in + 1) * sizeof(float));
cudaMallocManaged(&output_hidden_cuda, (hid + 1) * sizeof(float));
cudaMallocManaged(&input_hidden_cuda, (in + 1) * (hid + 1) * sizeof(float));
cudaMallocManaged(&hidden_partial_sum, num_blocks * WIDTH * sizeof(float));
cudaMallocManaged(&input_prev_weights_cuda, (in + 1) * (hid + 1) * sizeof(float));
memcpy(input_cuda,net->input_units, (in + 1) *sizeof(float));
// this preprocessing stage is added to correct the bugs of wrong memcopy using two-dimensional net->inputweights
for (int k = 0; k <= in; k++) {
for (int j = 0; j <= hid; j++) {
input_hidden_cuda[m] = net->input_weights[k][j];
input_prev_weights_cuda[m] = net-> input_prev_weights[k][j];
m++;
}
}
#ifdef PREF
cudaStream_t stream1;
cudaStream_t stream2;
cudaStream_t stream3;
cudaStream_t stream4;
cudaStream_t stream5;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
cudaStreamCreate(&stream4);
cudaStreamCreate(&stream5);
cudaMemPrefetchAsync(input_cuda,(in + 1) * sizeof(float), 0, stream1 );
cudaMemPrefetchAsync(output_hidden_cuda,(hid + 1) * sizeof(float), 0, stream2 );
cudaMemPrefetchAsync(input_hidden_cuda,(in + 1) * (hid + 1) * sizeof(float), 0, stream3 );
cudaMemPrefetchAsync(hidden_partial_sum,num_blocks * WIDTH * sizeof(float), 0, stream4 );
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
cudaStreamSynchronize(stream3);
cudaStreamSynchronize(stream4);
#endif
#ifdef PREF
printf("Performing GPU computation\n");
for(int i = 0; i < ITERATIONS; i ++){
bpnn_layerforward_CUDA<<< grid, threads, 0, stream5 >>>(input_cuda,
output_hidden_cuda,
input_hidden_cuda,
hidden_partial_sum,
in,
hid);
cudaDeviceSynchronize();
}
#else
printf("Performing GPU computation\n");
for(int i = 0; i < ITERATIONS; i ++){
bpnn_layerforward_CUDA<<< grid, threads >>>(input_cuda,
output_hidden_cuda,
input_hidden_cuda,
hidden_partial_sum,
in,
hid);
cudaDeviceSynchronize();
}
#endif
// cudaMemcpy(partial_sum, hidden_partial_sum, num_blocks * WIDTH * sizeof(float), cudaMemcpyDeviceToHost);
for (int j = 1; j <= hid; j++) {
sum = 0.0;
for (int k = 0; k < num_blocks; k++) {
sum += hidden_partial_sum[k * hid + j-1] ;
}
sum += net->input_weights[0][j];
net-> hidden_units[j] = float(1.0 / (1.0 + exp(-sum)));
}
bpnn_layerforward(net->hidden_units, net->output_units, net->hidden_weights, hid, out);
bpnn_output_error(net->output_delta, net->target, net->output_units, out, &out_err);
bpnn_hidden_error(net->hidden_delta, hid, net->output_delta, out, net->hidden_weights, net->hidden_units, &hid_err);
bpnn_adjust_weights(net->output_delta, out, net->hidden_units, hid, net->hidden_weights, net->hidden_prev_weights);
cudaMallocManaged( &hidden_delta_cuda, (hid + 1) * sizeof(float));
memcpy(hidden_delta_cuda, net->hidden_delta,(hid + 1)* sizeof(float));
#ifdef PREF
cudaMemPrefetchAsync(input_prev_weights_cuda, (in + 1) * (hid + 1) * sizeof(float), 0, stream1 );
cudaMemPrefetchAsync(hidden_delta_cuda,(hid + 1) * sizeof(float), 0, stream2 );
// cudaMemPrefetchAsync(input_cuda,(in + 1) * sizeof(float), 0, stream3 );
// cudaMemPrefetchAsync(input_hidden_cuda,(in + 1) * (hid + 1) * sizeof(float), 0, stream4 );
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
// cudaStreamSynchronize(stream3);
// cudaStreamSynchronize(stream4);
for(int i = 0; i < ITERATIONS; i ++){
bpnn_adjust_weights_cuda<<< grid, threads, 0, stream5 >>>(hidden_delta_cuda,
hid,
input_cuda,
in,
input_hidden_cuda,
input_prev_weights_cuda
);
cudaDeviceSynchronize();
}
#else
for(int i = 0; i < ITERATIONS; i ++){
bpnn_adjust_weights_cuda<<< grid, threads >>>(hidden_delta_cuda,
hid,
input_cuda,
in,
input_hidden_cuda,
input_prev_weights_cuda
);
cudaDeviceSynchronize();
}
#endif
memcpy(net->input_units, input_cuda, (in + 1) * sizeof(float));
cudaFree(input_cuda);
cudaFree(output_hidden_cuda);
cudaFree(input_hidden_cuda);
cudaFree(hidden_partial_sum);
cudaFree(input_prev_weights_cuda);
cudaFree(hidden_delta_cuda);
}
|
e0234008b3157837933f97891a39e93bc67600a5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cmath>
#include <algorithm>
#include <stdio.h>
#include <hip/hip_fp16.h>
#include <hipcub/hipcub.hpp>
#include "torch_cum_maxmin.h"
#include "amir_cuda_util/cuda_util.h"
namespace amirstan
{
namespace plugin
{
using namespace amirstan::cuda;
template<typename T, bool is_max> struct BlockPrefixPairCumCallbackOp;
template<typename T>
struct BlockPrefixPairCumCallbackOp<T,true>
{
// Running prefix
hipcub::KeyValuePair<int, T> running_total;
// Constructor
__device__ BlockPrefixPairCumCallbackOp(hipcub::KeyValuePair<int, T> running_total) : running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide scan.
__device__ hipcub::KeyValuePair<int, T> operator()(hipcub::KeyValuePair<int, T> block_aggregate)
{
hipcub::KeyValuePair<int, T> old_prefix = running_total;
running_total = (block_aggregate.value > old_prefix.value) ? block_aggregate : old_prefix;
return old_prefix;
}
};
template<typename T>
struct BlockPrefixPairCumCallbackOp<T,false>
{
// Running prefix
hipcub::KeyValuePair<int, T> running_total;
// Constructor
__device__ BlockPrefixPairCumCallbackOp(hipcub::KeyValuePair<int, T> running_total) : running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide scan.
__device__ hipcub::KeyValuePair<int, T> operator()(hipcub::KeyValuePair<int, T> block_aggregate)
{
hipcub::KeyValuePair<int, T> old_prefix = running_total;
running_total = (block_aggregate.value < old_prefix.value) ? block_aggregate : old_prefix;
return old_prefix;
}
};
template <typename T>
__global__ void torch_cum_maxmin_warp_kernel(T* output, int* out_index, const T* input,
size_t stride, int dim_size, size_t cum_size
,const int cum_type){
// create block scan
typedef cub::WarpScan<hipcub::KeyValuePair<int, T>> warpScan;
__shared__ union {
typename warpScan::TempStorage scan[CUDA_NUM_WARP];
} temp_storage;
for (int index = (blockIdx.x*CUDA_NUM_WARP)+int(threadIdx.x/CUDA_WARP_SIZE); index < cum_size; index += gridDim.x*CUDA_NUM_WARP){
// compute cum start
const size_t pre_index = index/stride;
const size_t post_index = index%stride;
const size_t cum_start = pre_index*stride*dim_size + post_index;
hipcub::KeyValuePair<int, T> aggregate_value{0, input[cum_start]};
for(int warp_offset = 0; warp_offset<dim_size; warp_offset += CUDA_WARP_SIZE){
const size_t cum_position = warp_offset + threadIdx.x%CUDA_WARP_SIZE;
hipcub::KeyValuePair<int, T> thread_data = {cum_position, cum_position<dim_size? input[cum_start+cum_position*stride]:0};
if(cum_type==0){
thread_data = thread_data.value>aggregate_value.value? thread_data:aggregate_value;
warpScan(temp_storage.scan[int(threadIdx.x/CUDA_WARP_SIZE)]).InclusiveScan(thread_data, thread_data, hipcub::ArgMax(), aggregate_value);
}else{
thread_data = thread_data.value<aggregate_value.value? thread_data:aggregate_value;
warpScan(temp_storage.scan[int(threadIdx.x/CUDA_WARP_SIZE)]).InclusiveScan(thread_data, thread_data, hipcub::ArgMin(), aggregate_value);
}
// Store scanned items to output segment
if(cum_position<dim_size){
output[cum_start+cum_position*stride] = thread_data.value;
out_index[cum_start+cum_position*stride] = thread_data.key;
}
}
}
}
static void create_size_stride(const int* dims, int nb_dims, TensorSize &size, TensorStride& stride){
memcpy(&size.size[0], dims, sizeof(int)*nb_dims);
stride.size[nb_dims-1] = 1;
for(int i=nb_dims-2; i>=0; --i){
stride.size[i] = stride.size[i+1] * size.size[i+1];
}
}
template <typename T>
void torch_cum_maxmin(T *output, int *index, const T* input,
int* input_dims, int nb_dims,
int cum_dim, int cum_type,
hipStream_t stream){
TensorSize ts_input_size;
TensorStride input_stride;
create_size_stride(input_dims, nb_dims, ts_input_size, input_stride);
size_t cum_size = 1;
for(int i=0; i<nb_dims; ++i){
if(i!=cum_dim){
cum_size*=ts_input_size.size[i];
}
}
size_t num_blocks = std::min<long>(kMaxGridNum,(cum_size+CUDA_NUM_WARP-1)/CUDA_NUM_WARP);
hipLaunchKernelGGL(( torch_cum_maxmin_warp_kernel<T>), dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, stream, output, index, input,
input_stride.size[cum_dim], ts_input_size.size[cum_dim], cum_size,
cum_type);
}
template void torch_cum_maxmin<float>(float *output, int *index, const float* input,
int* input_dims, int nb_dims,
int cum_dim, int cum_type,
hipStream_t stream);
} // namespace plugin
} // namespace amirstan | e0234008b3157837933f97891a39e93bc67600a5.cu | #include <cmath>
#include <algorithm>
#include <stdio.h>
#include <cuda_fp16.h>
#include <cub/cub.cuh>
#include "torch_cum_maxmin.h"
#include "amir_cuda_util/cuda_util.h"
namespace amirstan
{
namespace plugin
{
using namespace amirstan::cuda;
template<typename T, bool is_max> struct BlockPrefixPairCumCallbackOp;
template<typename T>
struct BlockPrefixPairCumCallbackOp<T,true>
{
// Running prefix
cub::KeyValuePair<int, T> running_total;
// Constructor
__device__ BlockPrefixPairCumCallbackOp(cub::KeyValuePair<int, T> running_total) : running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide scan.
__device__ cub::KeyValuePair<int, T> operator()(cub::KeyValuePair<int, T> block_aggregate)
{
cub::KeyValuePair<int, T> old_prefix = running_total;
running_total = (block_aggregate.value > old_prefix.value) ? block_aggregate : old_prefix;
return old_prefix;
}
};
template<typename T>
struct BlockPrefixPairCumCallbackOp<T,false>
{
// Running prefix
cub::KeyValuePair<int, T> running_total;
// Constructor
__device__ BlockPrefixPairCumCallbackOp(cub::KeyValuePair<int, T> running_total) : running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide scan.
__device__ cub::KeyValuePair<int, T> operator()(cub::KeyValuePair<int, T> block_aggregate)
{
cub::KeyValuePair<int, T> old_prefix = running_total;
running_total = (block_aggregate.value < old_prefix.value) ? block_aggregate : old_prefix;
return old_prefix;
}
};
template <typename T>
__global__ void torch_cum_maxmin_warp_kernel(T* output, int* out_index, const T* input,
size_t stride, int dim_size, size_t cum_size
,const int cum_type){
// create block scan
typedef cub::WarpScan<cub::KeyValuePair<int, T>> warpScan;
__shared__ union {
typename warpScan::TempStorage scan[CUDA_NUM_WARP];
} temp_storage;
for (int index = (blockIdx.x*CUDA_NUM_WARP)+int(threadIdx.x/CUDA_WARP_SIZE); index < cum_size; index += gridDim.x*CUDA_NUM_WARP){
// compute cum start
const size_t pre_index = index/stride;
const size_t post_index = index%stride;
const size_t cum_start = pre_index*stride*dim_size + post_index;
cub::KeyValuePair<int, T> aggregate_value{0, input[cum_start]};
for(int warp_offset = 0; warp_offset<dim_size; warp_offset += CUDA_WARP_SIZE){
const size_t cum_position = warp_offset + threadIdx.x%CUDA_WARP_SIZE;
cub::KeyValuePair<int, T> thread_data = {cum_position, cum_position<dim_size? input[cum_start+cum_position*stride]:0};
if(cum_type==0){
thread_data = thread_data.value>aggregate_value.value? thread_data:aggregate_value;
warpScan(temp_storage.scan[int(threadIdx.x/CUDA_WARP_SIZE)]).InclusiveScan(thread_data, thread_data, cub::ArgMax(), aggregate_value);
}else{
thread_data = thread_data.value<aggregate_value.value? thread_data:aggregate_value;
warpScan(temp_storage.scan[int(threadIdx.x/CUDA_WARP_SIZE)]).InclusiveScan(thread_data, thread_data, cub::ArgMin(), aggregate_value);
}
// Store scanned items to output segment
if(cum_position<dim_size){
output[cum_start+cum_position*stride] = thread_data.value;
out_index[cum_start+cum_position*stride] = thread_data.key;
}
}
}
}
static void create_size_stride(const int* dims, int nb_dims, TensorSize &size, TensorStride& stride){
memcpy(&size.size[0], dims, sizeof(int)*nb_dims);
stride.size[nb_dims-1] = 1;
for(int i=nb_dims-2; i>=0; --i){
stride.size[i] = stride.size[i+1] * size.size[i+1];
}
}
template <typename T>
void torch_cum_maxmin(T *output, int *index, const T* input,
int* input_dims, int nb_dims,
int cum_dim, int cum_type,
cudaStream_t stream){
TensorSize ts_input_size;
TensorStride input_stride;
create_size_stride(input_dims, nb_dims, ts_input_size, input_stride);
size_t cum_size = 1;
for(int i=0; i<nb_dims; ++i){
if(i!=cum_dim){
cum_size*=ts_input_size.size[i];
}
}
size_t num_blocks = std::min<long>(kMaxGridNum,(cum_size+CUDA_NUM_WARP-1)/CUDA_NUM_WARP);
torch_cum_maxmin_warp_kernel<T><<<num_blocks, CUDA_NUM_THREADS, 0, stream>>>(output, index, input,
input_stride.size[cum_dim], ts_input_size.size[cum_dim], cum_size,
cum_type);
}
template void torch_cum_maxmin<float>(float *output, int *index, const float* input,
int* input_dims, int nb_dims,
int cum_dim, int cum_type,
cudaStream_t stream);
} // namespace plugin
} // namespace amirstan |
93b85ca90c9054b3bea0a07ae35f62050f04f644.hip | // !!! This is a file automatically generated by hipify!!!
#include "glm/ridge.h"
#include <gtest/gtest.h>
#include <cuda_utils.h>
#include <test_utils.h>
#include "ml_utils.h"
namespace ML {
namespace GLM {
using namespace MLCommon;
template<typename T>
struct RidgeInputs {
T tol;
int n_row;
int n_col;
int n_row_2;
int algo;
T alpha;
};
template<typename T>
class RidgeTest: public ::testing::TestWithParam<RidgeInputs<T> > {
protected:
void basicTest() {
params = ::testing::TestWithParam<RidgeInputs<T>>::GetParam();
int len = params.n_row * params.n_col;
int len2 = params.n_row_2 * params.n_col;
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
allocate(data, len);
allocate(labels, params.n_row);
allocate(coef, params.n_col);
allocate(coef2, params.n_col);
allocate(coef3, params.n_col);
allocate(coef_ref, params.n_col);
allocate(coef2_ref, params.n_col);
allocate(coef3_ref, params.n_col);
allocate(pred_data, len2);
allocate(pred, params.n_row_2);
allocate(pred_ref, params.n_row_2);
allocate(pred2, params.n_row_2);
allocate(pred2_ref, params.n_row_2);
allocate(pred3, params.n_row_2);
allocate(pred3_ref, params.n_row_2);
T alpha = params.alpha;
T data_h[len] = { 0.0, 0.0, 1.0, 0.0, 0.0, 1.0 };
updateDevice(data, data_h, len);
T labels_h[params.n_row] = { 0.0, 0.1, 1.0 };
updateDevice(labels, labels_h, params.n_row);
T coef_ref_h[params.n_col] = { 0.39999998, 0.4 };
updateDevice(coef_ref, coef_ref_h, params.n_col);
T coef2_ref_h[params.n_col] = { 0.3454546 , 0.34545454 };
updateDevice(coef2_ref, coef2_ref_h, params.n_col);
T coef3_ref_h[params.n_col] = { 0.3799999 , 0.38000008 };
updateDevice(coef3_ref, coef3_ref_h, params.n_col);
T pred_data_h[len2] = { 0.5, 2.0, 0.2, 1.0 };
updateDevice(pred_data, pred_data_h, len2);
T pred_ref_h[params.n_row_2] = { 0.28, 1.1999999 };
updateDevice(pred_ref, pred_ref_h, params.n_row_2);
T pred2_ref_h[params.n_row_2] = { 0.37818184, 1.1727273 };
updateDevice(pred2_ref, pred2_ref_h, params.n_row_2);
T pred3_ref_h[params.n_row_2] = { 0.37933332, 1.2533332 };
updateDevice(pred3_ref, pred3_ref_h, params.n_row_2);
intercept = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef,
&intercept, false, false, cublas_handle,
cusolver_handle, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef, intercept, pred,
cublas_handle);
updateDevice(data, data_h, len);
updateDevice(labels, labels_h, params.n_row);
intercept2 = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef2,
&intercept2, true, false, cublas_handle,
cusolver_handle, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef2, intercept2, pred2,
cublas_handle);
updateDevice(data, data_h, len);
updateDevice(labels, labels_h, params.n_row);
intercept3 = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef3,
&intercept3, true, true, cublas_handle,
cusolver_handle, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef3, intercept3, pred3,
cublas_handle);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
}
void SetUp() override {
basicTest();
}
void TearDown() override {
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(labels));
CUDA_CHECK(hipFree(coef));
CUDA_CHECK(hipFree(coef_ref));
CUDA_CHECK(hipFree(coef2));
CUDA_CHECK(hipFree(coef2_ref));
CUDA_CHECK(hipFree(coef3));
CUDA_CHECK(hipFree(coef3_ref));
CUDA_CHECK(hipFree(pred_data));
CUDA_CHECK(hipFree(pred));
CUDA_CHECK(hipFree(pred_ref));
CUDA_CHECK(hipFree(pred2));
CUDA_CHECK(hipFree(pred2_ref));
CUDA_CHECK(hipFree(pred3));
CUDA_CHECK(hipFree(pred3_ref));
}
protected:
RidgeInputs<T> params;
T *data, *labels, *coef, *coef_ref, *pred_data, *pred, *pred_ref;
T *coef2, *coef2_ref, *pred2, *pred2_ref;
T *coef3, *coef3_ref, *pred3, *pred3_ref;
T intercept, intercept2, intercept3;
};
const std::vector<RidgeInputs<float> > inputsf2 = {
{ 0.001f, 3, 2, 2, 0, 0.5f },
{ 0.001f, 3, 2, 2, 1, 0.5f } };
const std::vector<RidgeInputs<double> > inputsd2 = {
{ 0.001, 3, 2, 2, 0, 0.5 },
{ 0.001, 3, 2, 2, 1, 0.5 } };
typedef RidgeTest<float> RidgeTestF;
TEST_P(RidgeTestF, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef3_ref, coef3, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_ref, pred, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred2_ref, pred2, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred3_ref, pred3, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
}
typedef RidgeTest<double> RidgeTestD;
TEST_P(RidgeTestD, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef3_ref, coef3, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_ref, pred, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred2_ref, pred2, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred3_ref, pred3, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(RidgeTests, RidgeTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(RidgeTests, RidgeTestD, ::testing::ValuesIn(inputsd2));
}
} // end namespace ML
| 93b85ca90c9054b3bea0a07ae35f62050f04f644.cu | #include "glm/ridge.h"
#include <gtest/gtest.h>
#include <cuda_utils.h>
#include <test_utils.h>
#include "ml_utils.h"
namespace ML {
namespace GLM {
using namespace MLCommon;
template<typename T>
struct RidgeInputs {
T tol;
int n_row;
int n_col;
int n_row_2;
int algo;
T alpha;
};
template<typename T>
class RidgeTest: public ::testing::TestWithParam<RidgeInputs<T> > {
protected:
void basicTest() {
params = ::testing::TestWithParam<RidgeInputs<T>>::GetParam();
int len = params.n_row * params.n_col;
int len2 = params.n_row_2 * params.n_col;
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
allocate(data, len);
allocate(labels, params.n_row);
allocate(coef, params.n_col);
allocate(coef2, params.n_col);
allocate(coef3, params.n_col);
allocate(coef_ref, params.n_col);
allocate(coef2_ref, params.n_col);
allocate(coef3_ref, params.n_col);
allocate(pred_data, len2);
allocate(pred, params.n_row_2);
allocate(pred_ref, params.n_row_2);
allocate(pred2, params.n_row_2);
allocate(pred2_ref, params.n_row_2);
allocate(pred3, params.n_row_2);
allocate(pred3_ref, params.n_row_2);
T alpha = params.alpha;
T data_h[len] = { 0.0, 0.0, 1.0, 0.0, 0.0, 1.0 };
updateDevice(data, data_h, len);
T labels_h[params.n_row] = { 0.0, 0.1, 1.0 };
updateDevice(labels, labels_h, params.n_row);
T coef_ref_h[params.n_col] = { 0.39999998, 0.4 };
updateDevice(coef_ref, coef_ref_h, params.n_col);
T coef2_ref_h[params.n_col] = { 0.3454546 , 0.34545454 };
updateDevice(coef2_ref, coef2_ref_h, params.n_col);
T coef3_ref_h[params.n_col] = { 0.3799999 , 0.38000008 };
updateDevice(coef3_ref, coef3_ref_h, params.n_col);
T pred_data_h[len2] = { 0.5, 2.0, 0.2, 1.0 };
updateDevice(pred_data, pred_data_h, len2);
T pred_ref_h[params.n_row_2] = { 0.28, 1.1999999 };
updateDevice(pred_ref, pred_ref_h, params.n_row_2);
T pred2_ref_h[params.n_row_2] = { 0.37818184, 1.1727273 };
updateDevice(pred2_ref, pred2_ref_h, params.n_row_2);
T pred3_ref_h[params.n_row_2] = { 0.37933332, 1.2533332 };
updateDevice(pred3_ref, pred3_ref_h, params.n_row_2);
intercept = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef,
&intercept, false, false, cublas_handle,
cusolver_handle, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef, intercept, pred,
cublas_handle);
updateDevice(data, data_h, len);
updateDevice(labels, labels_h, params.n_row);
intercept2 = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef2,
&intercept2, true, false, cublas_handle,
cusolver_handle, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef2, intercept2, pred2,
cublas_handle);
updateDevice(data, data_h, len);
updateDevice(labels, labels_h, params.n_row);
intercept3 = T(0);
ridgeFit(data, params.n_row, params.n_col, labels, &alpha, 1, coef3,
&intercept3, true, true, cublas_handle,
cusolver_handle, params.algo);
ridgePredict(pred_data, params.n_row_2, params.n_col, coef3, intercept3, pred3,
cublas_handle);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
}
void SetUp() override {
basicTest();
}
void TearDown() override {
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(labels));
CUDA_CHECK(cudaFree(coef));
CUDA_CHECK(cudaFree(coef_ref));
CUDA_CHECK(cudaFree(coef2));
CUDA_CHECK(cudaFree(coef2_ref));
CUDA_CHECK(cudaFree(coef3));
CUDA_CHECK(cudaFree(coef3_ref));
CUDA_CHECK(cudaFree(pred_data));
CUDA_CHECK(cudaFree(pred));
CUDA_CHECK(cudaFree(pred_ref));
CUDA_CHECK(cudaFree(pred2));
CUDA_CHECK(cudaFree(pred2_ref));
CUDA_CHECK(cudaFree(pred3));
CUDA_CHECK(cudaFree(pred3_ref));
}
protected:
RidgeInputs<T> params;
T *data, *labels, *coef, *coef_ref, *pred_data, *pred, *pred_ref;
T *coef2, *coef2_ref, *pred2, *pred2_ref;
T *coef3, *coef3_ref, *pred3, *pred3_ref;
T intercept, intercept2, intercept3;
};
const std::vector<RidgeInputs<float> > inputsf2 = {
{ 0.001f, 3, 2, 2, 0, 0.5f },
{ 0.001f, 3, 2, 2, 1, 0.5f } };
const std::vector<RidgeInputs<double> > inputsd2 = {
{ 0.001, 3, 2, 2, 0, 0.5 },
{ 0.001, 3, 2, 2, 1, 0.5 } };
typedef RidgeTest<float> RidgeTestF;
TEST_P(RidgeTestF, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef3_ref, coef3, params.n_col,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_ref, pred, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred2_ref, pred2, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred3_ref, pred3, params.n_row_2,
CompareApproxAbs<float>(params.tol)));
}
typedef RidgeTest<double> RidgeTestD;
TEST_P(RidgeTestD, Fit) {
ASSERT_TRUE(
devArrMatch(coef_ref, coef, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef2_ref, coef2, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(coef3_ref, coef3, params.n_col,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred_ref, pred, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred2_ref, pred2, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
ASSERT_TRUE(
devArrMatch(pred3_ref, pred3, params.n_row_2,
CompareApproxAbs<double>(params.tol)));
}
INSTANTIATE_TEST_CASE_P(RidgeTests, RidgeTestF, ::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(RidgeTests, RidgeTestD, ::testing::ValuesIn(inputsd2));
}
} // end namespace ML
|
756daa8e67b5be1ce8aba8f446bd09322848af1f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box3d3r-32x32-1-256_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 685
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 7 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 3 - 3);
const AN5D_TYPE __c3Pad = (3);
#define __c3 c3
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
const AN5D_TYPE __halo3 = 3;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
}
}
else if (__c0Len % __side0LenMax)
{
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
-0.176f*A[t%2][i-3][j][k] +
0.0010f*A[t%2][i-3][j-3][k-3] +
0.0020f*A[t%2][i-3][j-3][k-2] +
0.0030f*A[t%2][i-3][j-3][k-1] +
0.0040f*A[t%2][i-3][j-3][k] +
0.0050f*A[t%2][i-3][j-3][k+1] +
0.0060f*A[t%2][i-3][j-3][k+2] +
0.0070f*A[t%2][i-3][j-3][k+3] +
0.0080f*A[t%2][i-3][j-2][k-3] +
0.0090f*A[t%2][i-3][j-2][k-2] +
0.0100f*A[t%2][i-3][j-2][k-1] +
0.0110f*A[t%2][i-3][j-2][k] +
0.0120f*A[t%2][i-3][j-2][k+1] +
0.0130f*A[t%2][i-3][j-2][k+2] +
0.0140f*A[t%2][i-3][j-2][k+3] +
0.0150f*A[t%2][i-3][j-1][k-3] +
0.0160f*A[t%2][i-3][j-1][k-2] +
0.0170f*A[t%2][i-3][j-1][k-1] +
0.0180f*A[t%2][i-3][j-1][k] +
0.0190f*A[t%2][i-3][j-1][k+1] +
0.0200f*A[t%2][i-3][j-1][k+2] +
0.0210f*A[t%2][i-3][j-1][k+3] +
0.0220f*A[t%2][i-3][j][k-3] +
0.0230f*A[t%2][i-3][j][k-2] +
0.0240f*A[t%2][i-3][j][k-1] +
0.0250f*A[t%2][i-3][j][k+1] +
0.0260f*A[t%2][i-3][j][k+2] +
0.0270f*A[t%2][i-3][j][k+3] +
0.0280f*A[t%2][i-3][j+1][k-3] +
0.0290f*A[t%2][i-3][j+1][k-2] +
0.0300f*A[t%2][i-3][j+1][k-1] +
0.0310f*A[t%2][i-3][j+1][k] +
0.0320f*A[t%2][i-3][j+1][k+1] +
0.0330f*A[t%2][i-3][j+1][k+2] +
0.0340f*A[t%2][i-3][j+1][k+3] +
0.0350f*A[t%2][i-3][j+2][k-3] +
0.0360f*A[t%2][i-3][j+2][k-2] +
0.0370f*A[t%2][i-3][j+2][k-1] +
0.0380f*A[t%2][i-3][j+2][k] +
0.0390f*A[t%2][i-3][j+2][k+1] +
0.0400f*A[t%2][i-3][j+2][k+2] +
0.0410f*A[t%2][i-3][j+2][k+3] +
0.0420f*A[t%2][i-3][j+3][k-3] +
0.0430f*A[t%2][i-3][j+3][k-2] +
0.0440f*A[t%2][i-3][j+3][k-1] +
0.0450f*A[t%2][i-3][j+3][k] +
0.0460f*A[t%2][i-3][j+3][k+1] +
0.0470f*A[t%2][i-3][j+3][k+2] +
0.0480f*A[t%2][i-3][j+3][k+3] +
0.1808f*A[t%2][i-2][j][k] -
0.0011f*A[t%2][i-2][j-3][k-3] -
0.0021f*A[t%2][i-2][j-3][k-2] -
0.0031f*A[t%2][i-2][j-3][k-1] -
0.0041f*A[t%2][i-2][j-3][k] -
0.0051f*A[t%2][i-2][j-3][k+1] -
0.0061f*A[t%2][i-2][j-3][k+2] -
0.0071f*A[t%2][i-2][j-3][k+3] -
0.0081f*A[t%2][i-2][j-2][k-3] -
0.0091f*A[t%2][i-2][j-2][k-2] -
0.0101f*A[t%2][i-2][j-2][k-1] -
0.0111f*A[t%2][i-2][j-2][k] -
0.0121f*A[t%2][i-2][j-2][k+1] -
0.0131f*A[t%2][i-2][j-2][k+2] -
0.0141f*A[t%2][i-2][j-2][k+3] -
0.0151f*A[t%2][i-2][j-1][k-3] -
0.0161f*A[t%2][i-2][j-1][k-2] -
0.0171f*A[t%2][i-2][j-1][k-1] -
0.0181f*A[t%2][i-2][j-1][k] -
0.0191f*A[t%2][i-2][j-1][k+1] -
0.0201f*A[t%2][i-2][j-1][k+2] -
0.0211f*A[t%2][i-2][j-1][k+3] -
0.0221f*A[t%2][i-2][j][k-3] -
0.0231f*A[t%2][i-2][j][k-2] -
0.0241f*A[t%2][i-2][j][k-1] -
0.0251f*A[t%2][i-2][j][k+1] -
0.0261f*A[t%2][i-2][j][k+2] -
0.0271f*A[t%2][i-2][j][k+3] -
0.0281f*A[t%2][i-2][j+1][k-3] -
0.0291f*A[t%2][i-2][j+1][k-2] -
0.0301f*A[t%2][i-2][j+1][k-1] -
0.0311f*A[t%2][i-2][j+1][k] -
0.0321f*A[t%2][i-2][j+1][k+1] -
0.0331f*A[t%2][i-2][j+1][k+2] -
0.0341f*A[t%2][i-2][j+1][k+3] -
0.0351f*A[t%2][i-2][j+2][k-3] -
0.0361f*A[t%2][i-2][j+2][k-2] -
0.0371f*A[t%2][i-2][j+2][k-1] -
0.0381f*A[t%2][i-2][j+2][k] -
0.0391f*A[t%2][i-2][j+2][k+1] -
0.0401f*A[t%2][i-2][j+2][k+2] -
0.0411f*A[t%2][i-2][j+2][k+3] -
0.0421f*A[t%2][i-2][j+3][k-3] -
0.0431f*A[t%2][i-2][j+3][k-2] -
0.0441f*A[t%2][i-2][j+3][k-1] -
0.0451f*A[t%2][i-2][j+3][k] -
0.0461f*A[t%2][i-2][j+3][k+1] -
0.0471f*A[t%2][i-2][j+3][k+2] -
0.0481f*A[t%2][i-2][j+3][k+3] +
-0.1856f*A[t%2][i-1][j][k] +
0.0012f*A[t%2][i-1][j-3][k-3] +
0.0022f*A[t%2][i-1][j-3][k-2] +
0.0032f*A[t%2][i-1][j-3][k-1] +
0.0042f*A[t%2][i-1][j-3][k] +
0.0052f*A[t%2][i-1][j-3][k+1] +
0.0062f*A[t%2][i-1][j-3][k+2] +
0.0072f*A[t%2][i-1][j-3][k+3] +
0.0082f*A[t%2][i-1][j-2][k-3] +
0.0092f*A[t%2][i-1][j-2][k-2] +
0.0102f*A[t%2][i-1][j-2][k-1] +
0.0112f*A[t%2][i-1][j-2][k] +
0.0122f*A[t%2][i-1][j-2][k+1] +
0.0132f*A[t%2][i-1][j-2][k+2] +
0.0142f*A[t%2][i-1][j-2][k+3] +
0.0152f*A[t%2][i-1][j-1][k-3] +
0.0162f*A[t%2][i-1][j-1][k-2] +
0.0172f*A[t%2][i-1][j-1][k-1] +
0.0182f*A[t%2][i-1][j-1][k] +
0.0192f*A[t%2][i-1][j-1][k+1] +
0.0202f*A[t%2][i-1][j-1][k+2] +
0.0212f*A[t%2][i-1][j-1][k+3] +
0.0222f*A[t%2][i-1][j][k-3] +
0.0232f*A[t%2][i-1][j][k-2] +
0.0242f*A[t%2][i-1][j][k-1] +
0.0252f*A[t%2][i-1][j][k+1] +
0.0262f*A[t%2][i-1][j][k+2] +
0.0272f*A[t%2][i-1][j][k+3] +
0.0282f*A[t%2][i-1][j+1][k-3] +
0.0292f*A[t%2][i-1][j+1][k-2] +
0.0302f*A[t%2][i-1][j+1][k-1] +
0.0312f*A[t%2][i-1][j+1][k] +
0.0322f*A[t%2][i-1][j+1][k+1] +
0.0332f*A[t%2][i-1][j+1][k+2] +
0.0342f*A[t%2][i-1][j+1][k+3] +
0.0352f*A[t%2][i-1][j+2][k-3] +
0.0362f*A[t%2][i-1][j+2][k-2] +
0.0372f*A[t%2][i-1][j+2][k-1] +
0.0382f*A[t%2][i-1][j+2][k] +
0.0392f*A[t%2][i-1][j+2][k+1] +
0.0402f*A[t%2][i-1][j+2][k+2] +
0.0412f*A[t%2][i-1][j+2][k+3] +
0.0422f*A[t%2][i-1][j+3][k-3] +
0.0432f*A[t%2][i-1][j+3][k-2] +
0.0442f*A[t%2][i-1][j+3][k-1] +
0.0452f*A[t%2][i-1][j+3][k] +
0.0462f*A[t%2][i-1][j+3][k+1] +
0.0472f*A[t%2][i-1][j+3][k+2] +
0.0482f*A[t%2][i-1][j+3][k+3] +
-0.1904f*A[t%2][i][j][k] +
0.0013f*A[t%2][i][j-3][k-3] +
0.0023f*A[t%2][i][j-3][k-2] +
0.0033f*A[t%2][i][j-3][k-1] +
0.0043f*A[t%2][i][j-3][k] +
0.0053f*A[t%2][i][j-3][k+1] +
0.0063f*A[t%2][i][j-3][k+2] +
0.0073f*A[t%2][i][j-3][k+3] +
0.0083f*A[t%2][i][j-2][k-3] +
0.0093f*A[t%2][i][j-2][k-2] +
0.0103f*A[t%2][i][j-2][k-1] +
0.0113f*A[t%2][i][j-2][k] +
0.0123f*A[t%2][i][j-2][k+1] +
0.0133f*A[t%2][i][j-2][k+2] +
0.0143f*A[t%2][i][j-2][k+3] +
0.0153f*A[t%2][i][j-1][k-3] +
0.0163f*A[t%2][i][j-1][k-2] +
0.0173f*A[t%2][i][j-1][k-1] +
0.0183f*A[t%2][i][j-1][k] +
0.0193f*A[t%2][i][j-1][k+1] +
0.0203f*A[t%2][i][j-1][k+2] +
0.0213f*A[t%2][i][j-1][k+3] +
0.0223f*A[t%2][i][j][k-3] +
0.0233f*A[t%2][i][j][k-2] +
0.0243f*A[t%2][i][j][k-1] +
0.0253f*A[t%2][i][j][k+1] +
0.0263f*A[t%2][i][j][k+2] +
0.0273f*A[t%2][i][j][k+3] +
0.0283f*A[t%2][i][j+1][k-3] +
0.0293f*A[t%2][i][j+1][k-2] +
0.0303f*A[t%2][i][j+1][k-1] +
0.0313f*A[t%2][i][j+1][k] +
0.0323f*A[t%2][i][j+1][k+1] +
0.0333f*A[t%2][i][j+1][k+2] +
0.0343f*A[t%2][i][j+1][k+3] +
0.0353f*A[t%2][i][j+2][k-3] +
0.0363f*A[t%2][i][j+2][k-2] +
0.0373f*A[t%2][i][j+2][k-1] +
0.0383f*A[t%2][i][j+2][k] +
0.0393f*A[t%2][i][j+2][k+1] +
0.0403f*A[t%2][i][j+2][k+2] +
0.0413f*A[t%2][i][j+2][k+3] +
0.0423f*A[t%2][i][j+3][k-3] +
0.0433f*A[t%2][i][j+3][k-2] +
0.0443f*A[t%2][i][j+3][k-1] +
0.0453f*A[t%2][i][j+3][k] +
0.0463f*A[t%2][i][j+3][k+1] +
0.0473f*A[t%2][i][j+3][k+2] +
0.0483f*A[t%2][i][j+3][k+3] +
0.1952f*A[t%2][i+1][j][k] -
0.0014f*A[t%2][i+1][j-3][k-3] -
0.0024f*A[t%2][i+1][j-3][k-2] -
0.0034f*A[t%2][i+1][j-3][k-1] -
0.0044f*A[t%2][i+1][j-3][k] -
0.0054f*A[t%2][i+1][j-3][k+1] -
0.0064f*A[t%2][i+1][j-3][k+2] -
0.0074f*A[t%2][i+1][j-3][k+3] -
0.0084f*A[t%2][i+1][j-2][k-3] -
0.0094f*A[t%2][i+1][j-2][k-2] -
0.0104f*A[t%2][i+1][j-2][k-1] -
0.0114f*A[t%2][i+1][j-2][k] -
0.0124f*A[t%2][i+1][j-2][k+1] -
0.0134f*A[t%2][i+1][j-2][k+2] -
0.0144f*A[t%2][i+1][j-2][k+3] -
0.0154f*A[t%2][i+1][j-1][k-3] -
0.0164f*A[t%2][i+1][j-1][k-2] -
0.0174f*A[t%2][i+1][j-1][k-1] -
0.0184f*A[t%2][i+1][j-1][k] -
0.0194f*A[t%2][i+1][j-1][k+1] -
0.0204f*A[t%2][i+1][j-1][k+2] -
0.0214f*A[t%2][i+1][j-1][k+3] -
0.0224f*A[t%2][i+1][j][k-3] -
0.0234f*A[t%2][i+1][j][k-2] -
0.0244f*A[t%2][i+1][j][k-1] -
0.0254f*A[t%2][i+1][j][k+1] -
0.0264f*A[t%2][i+1][j][k+2] -
0.0274f*A[t%2][i+1][j][k+3] -
0.0284f*A[t%2][i+1][j+1][k-3] -
0.0294f*A[t%2][i+1][j+1][k-2] -
0.0304f*A[t%2][i+1][j+1][k-1] -
0.0314f*A[t%2][i+1][j+1][k] -
0.0324f*A[t%2][i+1][j+1][k+1] -
0.0334f*A[t%2][i+1][j+1][k+2] -
0.0344f*A[t%2][i+1][j+1][k+3] -
0.0354f*A[t%2][i+1][j+2][k-3] -
0.0364f*A[t%2][i+1][j+2][k-2] -
0.0374f*A[t%2][i+1][j+2][k-1] -
0.0384f*A[t%2][i+1][j+2][k] -
0.0394f*A[t%2][i+1][j+2][k+1] -
0.0404f*A[t%2][i+1][j+2][k+2] -
0.0414f*A[t%2][i+1][j+2][k+3] -
0.0424f*A[t%2][i+1][j+3][k-3] -
0.0434f*A[t%2][i+1][j+3][k-2] -
0.0444f*A[t%2][i+1][j+3][k-1] -
0.0454f*A[t%2][i+1][j+3][k] -
0.0464f*A[t%2][i+1][j+3][k+1] -
0.0474f*A[t%2][i+1][j+3][k+2] -
0.0484f*A[t%2][i+1][j+3][k+3] -
-0.300f*A[t%2][i+2][j][k] +
0.0015f*A[t%2][i+2][j-3][k-3] +
0.0025f*A[t%2][i+2][j-3][k-2] +
0.0035f*A[t%2][i+2][j-3][k-1] +
0.0045f*A[t%2][i+2][j-3][k] +
0.0055f*A[t%2][i+2][j-3][k+1] +
0.0065f*A[t%2][i+2][j-3][k+2] +
0.0075f*A[t%2][i+2][j-3][k+3] +
0.0085f*A[t%2][i+2][j-2][k-3] +
0.0095f*A[t%2][i+2][j-2][k-2] +
0.0105f*A[t%2][i+2][j-2][k-1] +
0.0115f*A[t%2][i+2][j-2][k] +
0.0125f*A[t%2][i+2][j-2][k+1] +
0.0135f*A[t%2][i+2][j-2][k+2] +
0.0145f*A[t%2][i+2][j-2][k+3] +
0.0155f*A[t%2][i+2][j-1][k-3] +
0.0165f*A[t%2][i+2][j-1][k-2] +
0.0175f*A[t%2][i+2][j-1][k-1] +
0.0185f*A[t%2][i+2][j-1][k] +
0.0195f*A[t%2][i+2][j-1][k+1] +
0.0205f*A[t%2][i+2][j-1][k+2] +
0.0215f*A[t%2][i+2][j-1][k+3] +
0.0225f*A[t%2][i+2][j][k-3] +
0.0235f*A[t%2][i+2][j][k-2] +
0.0245f*A[t%2][i+2][j][k-1] +
0.0255f*A[t%2][i+2][j][k+1] +
0.0265f*A[t%2][i+2][j][k+2] +
0.0275f*A[t%2][i+2][j][k+3] +
0.0285f*A[t%2][i+2][j+1][k-3] +
0.0295f*A[t%2][i+2][j+1][k-2] +
0.0305f*A[t%2][i+2][j+1][k-1] +
0.0315f*A[t%2][i+2][j+1][k] +
0.0325f*A[t%2][i+2][j+1][k+1] +
0.0335f*A[t%2][i+2][j+1][k+2] +
0.0345f*A[t%2][i+2][j+1][k+3] +
0.0355f*A[t%2][i+2][j+2][k-3] +
0.0365f*A[t%2][i+2][j+2][k-2] +
0.0375f*A[t%2][i+2][j+2][k-1] +
0.0385f*A[t%2][i+2][j+2][k] +
0.0395f*A[t%2][i+2][j+2][k+1] +
0.0405f*A[t%2][i+2][j+2][k+2] +
0.0415f*A[t%2][i+2][j+2][k+3] +
0.0425f*A[t%2][i+2][j+3][k-3] +
0.0435f*A[t%2][i+2][j+3][k-2] +
0.0445f*A[t%2][i+2][j+3][k-1] +
0.0455f*A[t%2][i+2][j+3][k] +
0.0465f*A[t%2][i+2][j+3][k+1] +
0.0475f*A[t%2][i+2][j+3][k+2] +
0.1485f*A[t%2][i+2][j+3][k+3] +
0.2048f*A[t%2][i+3][j][k] -
0.0016f*A[t%2][i+3][j-3][k-3] -
0.0026f*A[t%2][i+3][j-3][k-2] -
0.0036f*A[t%2][i+3][j-3][k-1] -
0.0046f*A[t%2][i+3][j-3][k] -
0.0056f*A[t%2][i+3][j-3][k+1] -
0.0066f*A[t%2][i+3][j-3][k+2] -
0.0076f*A[t%2][i+3][j-3][k+3] -
0.0086f*A[t%2][i+3][j-2][k-3] -
0.0096f*A[t%2][i+3][j-2][k-2] -
0.0106f*A[t%2][i+3][j-2][k-1] -
0.0116f*A[t%2][i+3][j-2][k] -
0.0126f*A[t%2][i+3][j-2][k+1] -
0.0136f*A[t%2][i+3][j-2][k+2] -
0.0146f*A[t%2][i+3][j-2][k+3] -
0.0156f*A[t%2][i+3][j-1][k-3] -
0.0166f*A[t%2][i+3][j-1][k-2] -
0.0176f*A[t%2][i+3][j-1][k-1] -
0.0186f*A[t%2][i+3][j-1][k] -
0.0196f*A[t%2][i+3][j-1][k+1] -
0.0206f*A[t%2][i+3][j-1][k+2] -
0.0216f*A[t%2][i+3][j-1][k+3] -
0.0226f*A[t%2][i+3][j][k-3] -
0.0236f*A[t%2][i+3][j][k-2] -
0.0246f*A[t%2][i+3][j][k-1] -
0.0256f*A[t%2][i+3][j][k+1] -
0.0266f*A[t%2][i+3][j][k+2] -
0.0276f*A[t%2][i+3][j][k+3] -
0.0286f*A[t%2][i+3][j+1][k-3] -
0.0296f*A[t%2][i+3][j+1][k-2] -
0.0306f*A[t%2][i+3][j+1][k-1] -
0.0316f*A[t%2][i+3][j+1][k] -
0.0326f*A[t%2][i+3][j+1][k+1] -
0.0336f*A[t%2][i+3][j+1][k+2] -
0.0346f*A[t%2][i+3][j+1][k+3] -
0.0356f*A[t%2][i+3][j+2][k-3] -
0.0366f*A[t%2][i+3][j+2][k-2] -
0.0376f*A[t%2][i+3][j+2][k-1] -
0.0386f*A[t%2][i+3][j+2][k] -
0.0396f*A[t%2][i+3][j+2][k+1] -
0.0406f*A[t%2][i+3][j+2][k+2] -
0.0416f*A[t%2][i+3][j+2][k+3] -
0.0426f*A[t%2][i+3][j+3][k-3] -
0.0436f*A[t%2][i+3][j+3][k-2] -
0.0446f*A[t%2][i+3][j+3][k-1] -
0.0456f*A[t%2][i+3][j+3][k] -
0.0466f*A[t%2][i+3][j+3][k+1] -
0.0476f*A[t%2][i+3][j+3][k+2] -
0.0486f*A[t%2][i+3][j+3][k+3];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 756daa8e67b5be1ce8aba8f446bd09322848af1f.cu | #include <assert.h>
#include <stdio.h>
#include "box3d3r-32x32-1-256_kernel.hu"
#define BENCH_DIM 3
#define BENCH_FPP 685
#define BENCH_RAD 3
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize][dimsize]
= (SB_TYPE (*)[dimsize][dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 7 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 3 - 3);
const AN5D_TYPE __c1Pad = (3);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 3 - 3);
const AN5D_TYPE __c2Pad = (3);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 3 - 3);
const AN5D_TYPE __c3Pad = (3);
#define __c3 c3
const AN5D_TYPE __halo1 = 3;
const AN5D_TYPE __halo2 = 3;
const AN5D_TYPE __halo3 = 3;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 256;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len) * ((__c3Len + __side3Len - 1) / __side3Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
}
}
else if (__c0Len % __side0LenMax)
{
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
for (int k = BENCH_RAD; k < dimsize - BENCH_RAD; k++)
A[(t+1)%2][i][j][k] =
-0.176f*A[t%2][i-3][j][k] +
0.0010f*A[t%2][i-3][j-3][k-3] +
0.0020f*A[t%2][i-3][j-3][k-2] +
0.0030f*A[t%2][i-3][j-3][k-1] +
0.0040f*A[t%2][i-3][j-3][k] +
0.0050f*A[t%2][i-3][j-3][k+1] +
0.0060f*A[t%2][i-3][j-3][k+2] +
0.0070f*A[t%2][i-3][j-3][k+3] +
0.0080f*A[t%2][i-3][j-2][k-3] +
0.0090f*A[t%2][i-3][j-2][k-2] +
0.0100f*A[t%2][i-3][j-2][k-1] +
0.0110f*A[t%2][i-3][j-2][k] +
0.0120f*A[t%2][i-3][j-2][k+1] +
0.0130f*A[t%2][i-3][j-2][k+2] +
0.0140f*A[t%2][i-3][j-2][k+3] +
0.0150f*A[t%2][i-3][j-1][k-3] +
0.0160f*A[t%2][i-3][j-1][k-2] +
0.0170f*A[t%2][i-3][j-1][k-1] +
0.0180f*A[t%2][i-3][j-1][k] +
0.0190f*A[t%2][i-3][j-1][k+1] +
0.0200f*A[t%2][i-3][j-1][k+2] +
0.0210f*A[t%2][i-3][j-1][k+3] +
0.0220f*A[t%2][i-3][j][k-3] +
0.0230f*A[t%2][i-3][j][k-2] +
0.0240f*A[t%2][i-3][j][k-1] +
0.0250f*A[t%2][i-3][j][k+1] +
0.0260f*A[t%2][i-3][j][k+2] +
0.0270f*A[t%2][i-3][j][k+3] +
0.0280f*A[t%2][i-3][j+1][k-3] +
0.0290f*A[t%2][i-3][j+1][k-2] +
0.0300f*A[t%2][i-3][j+1][k-1] +
0.0310f*A[t%2][i-3][j+1][k] +
0.0320f*A[t%2][i-3][j+1][k+1] +
0.0330f*A[t%2][i-3][j+1][k+2] +
0.0340f*A[t%2][i-3][j+1][k+3] +
0.0350f*A[t%2][i-3][j+2][k-3] +
0.0360f*A[t%2][i-3][j+2][k-2] +
0.0370f*A[t%2][i-3][j+2][k-1] +
0.0380f*A[t%2][i-3][j+2][k] +
0.0390f*A[t%2][i-3][j+2][k+1] +
0.0400f*A[t%2][i-3][j+2][k+2] +
0.0410f*A[t%2][i-3][j+2][k+3] +
0.0420f*A[t%2][i-3][j+3][k-3] +
0.0430f*A[t%2][i-3][j+3][k-2] +
0.0440f*A[t%2][i-3][j+3][k-1] +
0.0450f*A[t%2][i-3][j+3][k] +
0.0460f*A[t%2][i-3][j+3][k+1] +
0.0470f*A[t%2][i-3][j+3][k+2] +
0.0480f*A[t%2][i-3][j+3][k+3] +
0.1808f*A[t%2][i-2][j][k] -
0.0011f*A[t%2][i-2][j-3][k-3] -
0.0021f*A[t%2][i-2][j-3][k-2] -
0.0031f*A[t%2][i-2][j-3][k-1] -
0.0041f*A[t%2][i-2][j-3][k] -
0.0051f*A[t%2][i-2][j-3][k+1] -
0.0061f*A[t%2][i-2][j-3][k+2] -
0.0071f*A[t%2][i-2][j-3][k+3] -
0.0081f*A[t%2][i-2][j-2][k-3] -
0.0091f*A[t%2][i-2][j-2][k-2] -
0.0101f*A[t%2][i-2][j-2][k-1] -
0.0111f*A[t%2][i-2][j-2][k] -
0.0121f*A[t%2][i-2][j-2][k+1] -
0.0131f*A[t%2][i-2][j-2][k+2] -
0.0141f*A[t%2][i-2][j-2][k+3] -
0.0151f*A[t%2][i-2][j-1][k-3] -
0.0161f*A[t%2][i-2][j-1][k-2] -
0.0171f*A[t%2][i-2][j-1][k-1] -
0.0181f*A[t%2][i-2][j-1][k] -
0.0191f*A[t%2][i-2][j-1][k+1] -
0.0201f*A[t%2][i-2][j-1][k+2] -
0.0211f*A[t%2][i-2][j-1][k+3] -
0.0221f*A[t%2][i-2][j][k-3] -
0.0231f*A[t%2][i-2][j][k-2] -
0.0241f*A[t%2][i-2][j][k-1] -
0.0251f*A[t%2][i-2][j][k+1] -
0.0261f*A[t%2][i-2][j][k+2] -
0.0271f*A[t%2][i-2][j][k+3] -
0.0281f*A[t%2][i-2][j+1][k-3] -
0.0291f*A[t%2][i-2][j+1][k-2] -
0.0301f*A[t%2][i-2][j+1][k-1] -
0.0311f*A[t%2][i-2][j+1][k] -
0.0321f*A[t%2][i-2][j+1][k+1] -
0.0331f*A[t%2][i-2][j+1][k+2] -
0.0341f*A[t%2][i-2][j+1][k+3] -
0.0351f*A[t%2][i-2][j+2][k-3] -
0.0361f*A[t%2][i-2][j+2][k-2] -
0.0371f*A[t%2][i-2][j+2][k-1] -
0.0381f*A[t%2][i-2][j+2][k] -
0.0391f*A[t%2][i-2][j+2][k+1] -
0.0401f*A[t%2][i-2][j+2][k+2] -
0.0411f*A[t%2][i-2][j+2][k+3] -
0.0421f*A[t%2][i-2][j+3][k-3] -
0.0431f*A[t%2][i-2][j+3][k-2] -
0.0441f*A[t%2][i-2][j+3][k-1] -
0.0451f*A[t%2][i-2][j+3][k] -
0.0461f*A[t%2][i-2][j+3][k+1] -
0.0471f*A[t%2][i-2][j+3][k+2] -
0.0481f*A[t%2][i-2][j+3][k+3] +
-0.1856f*A[t%2][i-1][j][k] +
0.0012f*A[t%2][i-1][j-3][k-3] +
0.0022f*A[t%2][i-1][j-3][k-2] +
0.0032f*A[t%2][i-1][j-3][k-1] +
0.0042f*A[t%2][i-1][j-3][k] +
0.0052f*A[t%2][i-1][j-3][k+1] +
0.0062f*A[t%2][i-1][j-3][k+2] +
0.0072f*A[t%2][i-1][j-3][k+3] +
0.0082f*A[t%2][i-1][j-2][k-3] +
0.0092f*A[t%2][i-1][j-2][k-2] +
0.0102f*A[t%2][i-1][j-2][k-1] +
0.0112f*A[t%2][i-1][j-2][k] +
0.0122f*A[t%2][i-1][j-2][k+1] +
0.0132f*A[t%2][i-1][j-2][k+2] +
0.0142f*A[t%2][i-1][j-2][k+3] +
0.0152f*A[t%2][i-1][j-1][k-3] +
0.0162f*A[t%2][i-1][j-1][k-2] +
0.0172f*A[t%2][i-1][j-1][k-1] +
0.0182f*A[t%2][i-1][j-1][k] +
0.0192f*A[t%2][i-1][j-1][k+1] +
0.0202f*A[t%2][i-1][j-1][k+2] +
0.0212f*A[t%2][i-1][j-1][k+3] +
0.0222f*A[t%2][i-1][j][k-3] +
0.0232f*A[t%2][i-1][j][k-2] +
0.0242f*A[t%2][i-1][j][k-1] +
0.0252f*A[t%2][i-1][j][k+1] +
0.0262f*A[t%2][i-1][j][k+2] +
0.0272f*A[t%2][i-1][j][k+3] +
0.0282f*A[t%2][i-1][j+1][k-3] +
0.0292f*A[t%2][i-1][j+1][k-2] +
0.0302f*A[t%2][i-1][j+1][k-1] +
0.0312f*A[t%2][i-1][j+1][k] +
0.0322f*A[t%2][i-1][j+1][k+1] +
0.0332f*A[t%2][i-1][j+1][k+2] +
0.0342f*A[t%2][i-1][j+1][k+3] +
0.0352f*A[t%2][i-1][j+2][k-3] +
0.0362f*A[t%2][i-1][j+2][k-2] +
0.0372f*A[t%2][i-1][j+2][k-1] +
0.0382f*A[t%2][i-1][j+2][k] +
0.0392f*A[t%2][i-1][j+2][k+1] +
0.0402f*A[t%2][i-1][j+2][k+2] +
0.0412f*A[t%2][i-1][j+2][k+3] +
0.0422f*A[t%2][i-1][j+3][k-3] +
0.0432f*A[t%2][i-1][j+3][k-2] +
0.0442f*A[t%2][i-1][j+3][k-1] +
0.0452f*A[t%2][i-1][j+3][k] +
0.0462f*A[t%2][i-1][j+3][k+1] +
0.0472f*A[t%2][i-1][j+3][k+2] +
0.0482f*A[t%2][i-1][j+3][k+3] +
-0.1904f*A[t%2][i][j][k] +
0.0013f*A[t%2][i][j-3][k-3] +
0.0023f*A[t%2][i][j-3][k-2] +
0.0033f*A[t%2][i][j-3][k-1] +
0.0043f*A[t%2][i][j-3][k] +
0.0053f*A[t%2][i][j-3][k+1] +
0.0063f*A[t%2][i][j-3][k+2] +
0.0073f*A[t%2][i][j-3][k+3] +
0.0083f*A[t%2][i][j-2][k-3] +
0.0093f*A[t%2][i][j-2][k-2] +
0.0103f*A[t%2][i][j-2][k-1] +
0.0113f*A[t%2][i][j-2][k] +
0.0123f*A[t%2][i][j-2][k+1] +
0.0133f*A[t%2][i][j-2][k+2] +
0.0143f*A[t%2][i][j-2][k+3] +
0.0153f*A[t%2][i][j-1][k-3] +
0.0163f*A[t%2][i][j-1][k-2] +
0.0173f*A[t%2][i][j-1][k-1] +
0.0183f*A[t%2][i][j-1][k] +
0.0193f*A[t%2][i][j-1][k+1] +
0.0203f*A[t%2][i][j-1][k+2] +
0.0213f*A[t%2][i][j-1][k+3] +
0.0223f*A[t%2][i][j][k-3] +
0.0233f*A[t%2][i][j][k-2] +
0.0243f*A[t%2][i][j][k-1] +
0.0253f*A[t%2][i][j][k+1] +
0.0263f*A[t%2][i][j][k+2] +
0.0273f*A[t%2][i][j][k+3] +
0.0283f*A[t%2][i][j+1][k-3] +
0.0293f*A[t%2][i][j+1][k-2] +
0.0303f*A[t%2][i][j+1][k-1] +
0.0313f*A[t%2][i][j+1][k] +
0.0323f*A[t%2][i][j+1][k+1] +
0.0333f*A[t%2][i][j+1][k+2] +
0.0343f*A[t%2][i][j+1][k+3] +
0.0353f*A[t%2][i][j+2][k-3] +
0.0363f*A[t%2][i][j+2][k-2] +
0.0373f*A[t%2][i][j+2][k-1] +
0.0383f*A[t%2][i][j+2][k] +
0.0393f*A[t%2][i][j+2][k+1] +
0.0403f*A[t%2][i][j+2][k+2] +
0.0413f*A[t%2][i][j+2][k+3] +
0.0423f*A[t%2][i][j+3][k-3] +
0.0433f*A[t%2][i][j+3][k-2] +
0.0443f*A[t%2][i][j+3][k-1] +
0.0453f*A[t%2][i][j+3][k] +
0.0463f*A[t%2][i][j+3][k+1] +
0.0473f*A[t%2][i][j+3][k+2] +
0.0483f*A[t%2][i][j+3][k+3] +
0.1952f*A[t%2][i+1][j][k] -
0.0014f*A[t%2][i+1][j-3][k-3] -
0.0024f*A[t%2][i+1][j-3][k-2] -
0.0034f*A[t%2][i+1][j-3][k-1] -
0.0044f*A[t%2][i+1][j-3][k] -
0.0054f*A[t%2][i+1][j-3][k+1] -
0.0064f*A[t%2][i+1][j-3][k+2] -
0.0074f*A[t%2][i+1][j-3][k+3] -
0.0084f*A[t%2][i+1][j-2][k-3] -
0.0094f*A[t%2][i+1][j-2][k-2] -
0.0104f*A[t%2][i+1][j-2][k-1] -
0.0114f*A[t%2][i+1][j-2][k] -
0.0124f*A[t%2][i+1][j-2][k+1] -
0.0134f*A[t%2][i+1][j-2][k+2] -
0.0144f*A[t%2][i+1][j-2][k+3] -
0.0154f*A[t%2][i+1][j-1][k-3] -
0.0164f*A[t%2][i+1][j-1][k-2] -
0.0174f*A[t%2][i+1][j-1][k-1] -
0.0184f*A[t%2][i+1][j-1][k] -
0.0194f*A[t%2][i+1][j-1][k+1] -
0.0204f*A[t%2][i+1][j-1][k+2] -
0.0214f*A[t%2][i+1][j-1][k+3] -
0.0224f*A[t%2][i+1][j][k-3] -
0.0234f*A[t%2][i+1][j][k-2] -
0.0244f*A[t%2][i+1][j][k-1] -
0.0254f*A[t%2][i+1][j][k+1] -
0.0264f*A[t%2][i+1][j][k+2] -
0.0274f*A[t%2][i+1][j][k+3] -
0.0284f*A[t%2][i+1][j+1][k-3] -
0.0294f*A[t%2][i+1][j+1][k-2] -
0.0304f*A[t%2][i+1][j+1][k-1] -
0.0314f*A[t%2][i+1][j+1][k] -
0.0324f*A[t%2][i+1][j+1][k+1] -
0.0334f*A[t%2][i+1][j+1][k+2] -
0.0344f*A[t%2][i+1][j+1][k+3] -
0.0354f*A[t%2][i+1][j+2][k-3] -
0.0364f*A[t%2][i+1][j+2][k-2] -
0.0374f*A[t%2][i+1][j+2][k-1] -
0.0384f*A[t%2][i+1][j+2][k] -
0.0394f*A[t%2][i+1][j+2][k+1] -
0.0404f*A[t%2][i+1][j+2][k+2] -
0.0414f*A[t%2][i+1][j+2][k+3] -
0.0424f*A[t%2][i+1][j+3][k-3] -
0.0434f*A[t%2][i+1][j+3][k-2] -
0.0444f*A[t%2][i+1][j+3][k-1] -
0.0454f*A[t%2][i+1][j+3][k] -
0.0464f*A[t%2][i+1][j+3][k+1] -
0.0474f*A[t%2][i+1][j+3][k+2] -
0.0484f*A[t%2][i+1][j+3][k+3] -
-0.300f*A[t%2][i+2][j][k] +
0.0015f*A[t%2][i+2][j-3][k-3] +
0.0025f*A[t%2][i+2][j-3][k-2] +
0.0035f*A[t%2][i+2][j-3][k-1] +
0.0045f*A[t%2][i+2][j-3][k] +
0.0055f*A[t%2][i+2][j-3][k+1] +
0.0065f*A[t%2][i+2][j-3][k+2] +
0.0075f*A[t%2][i+2][j-3][k+3] +
0.0085f*A[t%2][i+2][j-2][k-3] +
0.0095f*A[t%2][i+2][j-2][k-2] +
0.0105f*A[t%2][i+2][j-2][k-1] +
0.0115f*A[t%2][i+2][j-2][k] +
0.0125f*A[t%2][i+2][j-2][k+1] +
0.0135f*A[t%2][i+2][j-2][k+2] +
0.0145f*A[t%2][i+2][j-2][k+3] +
0.0155f*A[t%2][i+2][j-1][k-3] +
0.0165f*A[t%2][i+2][j-1][k-2] +
0.0175f*A[t%2][i+2][j-1][k-1] +
0.0185f*A[t%2][i+2][j-1][k] +
0.0195f*A[t%2][i+2][j-1][k+1] +
0.0205f*A[t%2][i+2][j-1][k+2] +
0.0215f*A[t%2][i+2][j-1][k+3] +
0.0225f*A[t%2][i+2][j][k-3] +
0.0235f*A[t%2][i+2][j][k-2] +
0.0245f*A[t%2][i+2][j][k-1] +
0.0255f*A[t%2][i+2][j][k+1] +
0.0265f*A[t%2][i+2][j][k+2] +
0.0275f*A[t%2][i+2][j][k+3] +
0.0285f*A[t%2][i+2][j+1][k-3] +
0.0295f*A[t%2][i+2][j+1][k-2] +
0.0305f*A[t%2][i+2][j+1][k-1] +
0.0315f*A[t%2][i+2][j+1][k] +
0.0325f*A[t%2][i+2][j+1][k+1] +
0.0335f*A[t%2][i+2][j+1][k+2] +
0.0345f*A[t%2][i+2][j+1][k+3] +
0.0355f*A[t%2][i+2][j+2][k-3] +
0.0365f*A[t%2][i+2][j+2][k-2] +
0.0375f*A[t%2][i+2][j+2][k-1] +
0.0385f*A[t%2][i+2][j+2][k] +
0.0395f*A[t%2][i+2][j+2][k+1] +
0.0405f*A[t%2][i+2][j+2][k+2] +
0.0415f*A[t%2][i+2][j+2][k+3] +
0.0425f*A[t%2][i+2][j+3][k-3] +
0.0435f*A[t%2][i+2][j+3][k-2] +
0.0445f*A[t%2][i+2][j+3][k-1] +
0.0455f*A[t%2][i+2][j+3][k] +
0.0465f*A[t%2][i+2][j+3][k+1] +
0.0475f*A[t%2][i+2][j+3][k+2] +
0.1485f*A[t%2][i+2][j+3][k+3] +
0.2048f*A[t%2][i+3][j][k] -
0.0016f*A[t%2][i+3][j-3][k-3] -
0.0026f*A[t%2][i+3][j-3][k-2] -
0.0036f*A[t%2][i+3][j-3][k-1] -
0.0046f*A[t%2][i+3][j-3][k] -
0.0056f*A[t%2][i+3][j-3][k+1] -
0.0066f*A[t%2][i+3][j-3][k+2] -
0.0076f*A[t%2][i+3][j-3][k+3] -
0.0086f*A[t%2][i+3][j-2][k-3] -
0.0096f*A[t%2][i+3][j-2][k-2] -
0.0106f*A[t%2][i+3][j-2][k-1] -
0.0116f*A[t%2][i+3][j-2][k] -
0.0126f*A[t%2][i+3][j-2][k+1] -
0.0136f*A[t%2][i+3][j-2][k+2] -
0.0146f*A[t%2][i+3][j-2][k+3] -
0.0156f*A[t%2][i+3][j-1][k-3] -
0.0166f*A[t%2][i+3][j-1][k-2] -
0.0176f*A[t%2][i+3][j-1][k-1] -
0.0186f*A[t%2][i+3][j-1][k] -
0.0196f*A[t%2][i+3][j-1][k+1] -
0.0206f*A[t%2][i+3][j-1][k+2] -
0.0216f*A[t%2][i+3][j-1][k+3] -
0.0226f*A[t%2][i+3][j][k-3] -
0.0236f*A[t%2][i+3][j][k-2] -
0.0246f*A[t%2][i+3][j][k-1] -
0.0256f*A[t%2][i+3][j][k+1] -
0.0266f*A[t%2][i+3][j][k+2] -
0.0276f*A[t%2][i+3][j][k+3] -
0.0286f*A[t%2][i+3][j+1][k-3] -
0.0296f*A[t%2][i+3][j+1][k-2] -
0.0306f*A[t%2][i+3][j+1][k-1] -
0.0316f*A[t%2][i+3][j+1][k] -
0.0326f*A[t%2][i+3][j+1][k+1] -
0.0336f*A[t%2][i+3][j+1][k+2] -
0.0346f*A[t%2][i+3][j+1][k+3] -
0.0356f*A[t%2][i+3][j+2][k-3] -
0.0366f*A[t%2][i+3][j+2][k-2] -
0.0376f*A[t%2][i+3][j+2][k-1] -
0.0386f*A[t%2][i+3][j+2][k] -
0.0396f*A[t%2][i+3][j+2][k+1] -
0.0406f*A[t%2][i+3][j+2][k+2] -
0.0416f*A[t%2][i+3][j+2][k+3] -
0.0426f*A[t%2][i+3][j+3][k-3] -
0.0436f*A[t%2][i+3][j+3][k-2] -
0.0446f*A[t%2][i+3][j+3][k-1] -
0.0456f*A[t%2][i+3][j+3][k] -
0.0466f*A[t%2][i+3][j+3][k+1] -
0.0476f*A[t%2][i+3][j+3][k+2] -
0.0486f*A[t%2][i+3][j+3][k+3];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
a7c2a4cc781fc1ae568a5ebd31f32d05942fa6a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "../swirl_user.h"
#include <fc2d_cudaclaw_check.cu>
__constant__ double s_tperiod;
__device__ double psi(double x, double y)
{
return (pow(sin(M_PI*x),2) * pow(sin(M_PI*y),2)) / M_PI;
}
void swirl_setprob(double period)
{
CHECK(hipMemcpyToSymbol(s_tperiod, &period, sizeof(double)));
}
__device__ void swirl_b4step2_test(int mbc, int mx, int my, int meqn, double q[],
double xlower, double ylower, double dx, double dy,
double time, double dt, int maux,
double aux[], int i, int j)
{
double vt;
double xll, yll;
double p1,p2,p3;
xll = xlower + (i-1)*dx;
yll = ylower + (j-1)*dy;
vt = cos(2*M_PI*(time+dt/2.0)/s_tperiod);
p1 = psi(xll,yll+dy);
p2 = psi(xll,yll);
p3 = psi(xll+dx,yll);
aux[0] = (p1-p2) / dy;
aux[1] = - (p3-p2) / dx;
aux[0] *= vt;
aux[1] *= vt;
}
__device__ cudaclaw_cuda_b4step2_t swirl_b4step2 = swirl_b4step2_test;
void swirl_assign_b4step2(cudaclaw_cuda_b4step2_t *b4step2)
{
hipError_t ce = hipMemcpyFromSymbol(b4step2, swirl_b4step2, sizeof(cudaclaw_cuda_b4step2_t));
if(ce != hipSuccess)
{
fclaw_global_essentialf("ERROR (swirl_b4step2): %s\n",hipGetErrorString(ce));
exit(0);
}
}
| a7c2a4cc781fc1ae568a5ebd31f32d05942fa6a2.cu | #include "../swirl_user.h"
#include <fc2d_cudaclaw_check.cu>
__constant__ double s_tperiod;
__device__ double psi(double x, double y)
{
return (pow(sin(M_PI*x),2) * pow(sin(M_PI*y),2)) / M_PI;
}
void swirl_setprob(double period)
{
CHECK(cudaMemcpyToSymbol(s_tperiod, &period, sizeof(double)));
}
__device__ void swirl_b4step2_test(int mbc, int mx, int my, int meqn, double q[],
double xlower, double ylower, double dx, double dy,
double time, double dt, int maux,
double aux[], int i, int j)
{
double vt;
double xll, yll;
double p1,p2,p3;
xll = xlower + (i-1)*dx;
yll = ylower + (j-1)*dy;
vt = cos(2*M_PI*(time+dt/2.0)/s_tperiod);
p1 = psi(xll,yll+dy);
p2 = psi(xll,yll);
p3 = psi(xll+dx,yll);
aux[0] = (p1-p2) / dy;
aux[1] = - (p3-p2) / dx;
aux[0] *= vt;
aux[1] *= vt;
}
__device__ cudaclaw_cuda_b4step2_t swirl_b4step2 = swirl_b4step2_test;
void swirl_assign_b4step2(cudaclaw_cuda_b4step2_t *b4step2)
{
cudaError_t ce = cudaMemcpyFromSymbol(b4step2, swirl_b4step2, sizeof(cudaclaw_cuda_b4step2_t));
if(ce != cudaSuccess)
{
fclaw_global_essentialf("ERROR (swirl_b4step2): %s\n",cudaGetErrorString(ce));
exit(0);
}
}
|
ddafc3abfbe0f19e2563daf83528eb46d9d5e69d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cstddef>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <thrust/host_vector.h>
#include "types.hpp"
#include "problem.hpp"
#include "show.hpp"
#include "Matrix.hpp"
using paplp::Matrix;
using std::cout;
using std::endl;
using std::string;
texture<float, hipTextureType2D, hipReadModeElementType> textureRef;
#define BLOCK_SIZE 32
__global__ void interweavedReduction(char* blocks, Data* c, size_t N, size_t M)
{
size_t blockId = blockIdx.y * blockDim.x + blockIdx.x;
blocks[blockId] = 5;
}
__host__ int main()
{
Data epsilon = 0.1;
HostVector objective(3);
HostVector constraintMatrix(9);
HostVector constraintBounds(3);
Problem initialProblem(
epsilon,
objective,
constraintMatrix,
constraintBounds);
SpecialProblem specialProblem(initialProblem);
ShowHostVector(specialProblem.objective);
Matrix coefficients(100,100);
/*
size_t N = 200;
size_t BLOCK = 10;
size_t NoOfBlocks = N / BLOCKS;
size_t SIZE = sizeof(int) * N;
int* hostValues = (data*)malloc(SIZE);
int* deviceValues;
hipMalloc(&deviceValues, SIZE);
char* blocks;
hipMalloc(&blocks, sizeof(char) * NoOfBlocks);
hipMemcpy(deviceValues, hostValues, SIZE, hipMemcpyHostToDevice);
exponent<<<N/BLOCK, BLOCK>>>(deviceValues);
hipMemcpy(hostValues, deviceValues, SIZE, hipMemcpyDeviceToHost);
return 0;
*/
}
| ddafc3abfbe0f19e2563daf83528eb46d9d5e69d.cu | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cstddef>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <thrust/host_vector.h>
#include "types.hpp"
#include "problem.hpp"
#include "show.hpp"
#include "Matrix.hpp"
using paplp::Matrix;
using std::cout;
using std::endl;
using std::string;
texture<float, cudaTextureType2D, cudaReadModeElementType> textureRef;
#define BLOCK_SIZE 32
__global__ void interweavedReduction(char* blocks, Data* c, size_t N, size_t M)
{
size_t blockId = blockIdx.y * blockDim.x + blockIdx.x;
blocks[blockId] = 5;
}
__host__ int main()
{
Data epsilon = 0.1;
HostVector objective(3);
HostVector constraintMatrix(9);
HostVector constraintBounds(3);
Problem initialProblem(
epsilon,
objective,
constraintMatrix,
constraintBounds);
SpecialProblem specialProblem(initialProblem);
ShowHostVector(specialProblem.objective);
Matrix coefficients(100,100);
/*
size_t N = 200;
size_t BLOCK = 10;
size_t NoOfBlocks = N / BLOCKS;
size_t SIZE = sizeof(int) * N;
int* hostValues = (data*)malloc(SIZE);
int* deviceValues;
cudaMalloc(&deviceValues, SIZE);
char* blocks;
cudaMalloc(&blocks, sizeof(char) * NoOfBlocks);
cudaMemcpy(deviceValues, hostValues, SIZE, cudaMemcpyHostToDevice);
exponent<<<N/BLOCK, BLOCK>>>(deviceValues);
cudaMemcpy(hostValues, deviceValues, SIZE, cudaMemcpyDeviceToHost);
return 0;
*/
}
|
d9739e021babf2b3c2fdd3f460e0a7430718c97d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by Alex on 3/7/2017.
//
#include <stdio.h>
#include "mat.h"
#include "misc.h"
/* Private define ------------------------------------------------------------*/
/* Private typedef -----------------------------------------------------------*/
/* Private macro -------------------------------------------------------------*/
/* Private function prototypes -----------------------------------------------*/
/* Private variables ---------------------------------------------------------*/
static int Mat_Block_Size = 16;
/* ---------------------------------------------------------------------------*/
/**
*@brief Get a matrix element
*@param
*@retval None
*/
__host__ __device__ float MAT_GetElement(const Tp_fMat_TypeDef Mat, size_t row, size_t col)
{
return Mat.pElements[row * Mat.Width + col];
}
/**
*@brief Set a matrix element
*@param
*@retval None
*/
__host__ __device__ void MAT_SetElement(Tp_fMat_TypeDef Mat, size_t row, size_t col, float value)
{
Mat.pElements[row * Mat.Width + col] = value;
}
/**
*@brief Set all matrix elements with value
*@param
*@retval None
*/
__host__ __device__ void MAT_SetElementAll(Tp_fMat_TypeDef Mat, float value)
{
for (size_t i = 0; i < Mat.Height; i++)
{
for (size_t j = 0; j < Mat.Width; j++)
{
MAT_SetElement(Mat, i, j, value);
}
}
}
/**
*@brief Get a matrix element
*@param
*@retval None
*/
__host__ __device__ float *MAT_GetElementRef(const Tp_fMat_TypeDef Mat, size_t row, size_t col)
{
return &Mat.pElements[row * Mat.Width + col];
}
/**
*@brief Get a row vector
*@param
*@retval None
*/
__host__ __device__ float *MAT_GetRow_Vec(const Tp_fMat_TypeDef Mat, size_t row)
{
return &Mat.pElements[row * Mat.Width + 0];
}
/**
*@brief Print matrix value
*@param
*@retval None
*/
__host__ __device__ void MAT_PrintMat(Tp_fMat_TypeDef Mat)
{
for (size_t i = 0; i < Mat.Height; i++)
{
for (size_t j = 0; j < Mat.Width; j++)
{
printf("%.1f ", MAT_GetElement(Mat, i, j));
}
printf("\n");
}
printf("\n");
}
/**
*@brief Print vector values
*@param
*@retval None
*/
__host__ __device__ void MAT_PrintVecInt(Tp_intVec_TypeDef Vec)
{
for (size_t i = 0; i < Vec.Size; i++)
{
printf("%d ", Vec.pElements[i]);
}
printf("\n");
}
/**
*@brief Print vector values
*@param
*@retval None
*/
__host__ __device__ void MAT_PrintVecFloat(Tp_fVec_TypeDef Vec)
{
for (size_t i = 0; i < Vec.Size; i++)
{
printf("%.2f ", Vec.pElements[i]);
}
printf("\n");
}
/**
*@brief GPU - kernel
*@param
*@retval None
*/
__global__ void MAT_MulKernel(Tp_fMat_TypeDef A, Tp_fMat_TypeDef B, Tp_fMat_TypeDef C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0.0;
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < A.Height && col < B.Width)
{
for (int e = 0; e < A.Width; ++e)
{
Cvalue += (MAT_GetElement(A, row, col)) * (MAT_GetElement(B, row, col));
}
}
MAT_SetElement(C, row, col, Cvalue);
}
/**
*@brief GPU - kernel
*@param
*@retval None
*/
__global__ void MAT_SumKernel(Tp_fMat_TypeDef A, Tp_fMat_TypeDef B, Tp_fMat_TypeDef C)
{
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < C.Height && col < C.Width)
{
MAT_SetElement(C, row, col, (MAT_GetElement(A, row, col) + MAT_GetElement(B, row, col)));
}
}
/**
*@brief Matrices multiplication
*@param
*@retval None
*/
void MAT_Mult(const Tp_fMat_TypeDef A, const Tp_fMat_TypeDef B, Tp_fMat_TypeDef C)
{
Tp_fMat_TypeDef d_A;
Tp_fMat_TypeDef d_B;
Tp_fMat_TypeDef d_C;
size_t Size;
d_A = A;
Size = A.Width * A.Height * sizeof(float);
checkCudaErrors(hipMalloc(&d_A.pElements, Size));
checkCudaErrors(hipMemcpy(d_A.pElements, A.pElements, Size, hipMemcpyHostToDevice));
d_B = B;
Size = B.Width * B.Height * sizeof(float);
checkCudaErrors(hipMalloc(&d_B.pElements, Size));
checkCudaErrors(hipMemcpy(d_B.pElements, B.pElements, Size, hipMemcpyHostToDevice));
d_C = C;
Size = C.Width * C.Height * sizeof(float);
checkCudaErrors(hipMalloc(&d_C.pElements, Size));
// Invoke kernel
dim3 dimBlock(Mat_Block_Size, Mat_Block_Size);
dim3 dimGrid((B.Width + dimBlock.x - 1) / dimBlock.x, (A.Height + dimBlock.y - 1) / dimBlock.y);
MAT_MulKernel << < dimGrid, dimBlock >> > (d_A, d_B, d_C);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(C.pElements, d_C.pElements, Size, hipMemcpyDeviceToHost));
// Free device memory
checkCudaErrors(hipFree(d_A.pElements));
checkCudaErrors(hipFree(d_B.pElements));
checkCudaErrors(hipFree(d_C.pElements));
}
/**
*@brief Matrices sum
*@param
*@retval None
*/
void MAT_Sum(const Tp_fMat_TypeDef A, const Tp_fMat_TypeDef B, Tp_fMat_TypeDef C)
{
StopWatchInterface *timer = NULL;
Tp_fMat_TypeDef d_A;
Tp_fMat_TypeDef d_B;
Tp_fMat_TypeDef d_C;
size_t Size;
printf("\nGPU kernel MAT_Sum - Start\n");
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
d_A = A;
Size = A.Width * A.Height * sizeof(float);
checkCudaErrors(hipMalloc(&d_A.pElements, Size));
checkCudaErrors(hipMemcpy(d_A.pElements, A.pElements, Size, hipMemcpyHostToDevice));
d_B = B;
Size = B.Width * B.Height * sizeof(float);
checkCudaErrors(hipMalloc(&d_B.pElements, Size));
checkCudaErrors(hipMemcpy(d_B.pElements, B.pElements, Size, hipMemcpyHostToDevice));
d_C = C;
Size = C.Width * C.Height * sizeof(float);
checkCudaErrors(hipMalloc(&d_C.pElements, Size));
// Invoke kernel
dim3 dimBlock(Mat_Block_Size, Mat_Block_Size);
dim3 dimGrid((C.Width + dimBlock.x - 1) / dimBlock.x, (C.Height + dimBlock.y - 1) / dimBlock.y);
MAT_SumKernel << < dimGrid, dimBlock >> > (d_A, d_B, d_C);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpy(C.pElements, d_C.pElements, Size, hipMemcpyDeviceToHost));
// Free device memory
checkCudaErrors(hipFree(d_A.pElements));
checkCudaErrors(hipFree(d_B.pElements));
checkCudaErrors(hipFree(d_C.pElements));
sdkStopTimer(&timer);
printf("GPU kernel - Complete, time:%fms\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
}
#define MAT_TEST_WIDTH 4
#define MAT_TEST_HEIGHT 4
#define MAT_TEST_SIZE (MAT_TEST_WIDTH * MAT_TEST_HEIGHT)
/**
*@brief Test
*@param
*@retval None
*/
void MAT_Mult_Test(void)
{
MISC_Bl_Size_TypeDef tBl_Size;
float A_Arr[MAT_TEST_SIZE] = {0.0};
float B_Arr[MAT_TEST_SIZE] = {0.0};
float C_Arr[MAT_TEST_SIZE] = {0.0};
Tp_fMat_TypeDef h_A;
Tp_fMat_TypeDef h_B;
Tp_fMat_TypeDef h_C;
tBl_Size = MISC_Get_Block_Size();
Mat_Block_Size = tBl_Size.Bl_2d;
h_A.Width = MAT_TEST_WIDTH;
h_A.Height = MAT_TEST_HEIGHT;
h_A.pElements = A_Arr;
h_B.Width = MAT_TEST_WIDTH;
h_B.Height = MAT_TEST_HEIGHT;
h_B.pElements = B_Arr;
h_C.Width = MAT_TEST_WIDTH;
h_C.Height = MAT_TEST_HEIGHT;
h_C.pElements = C_Arr;
MAT_SetElement(h_A, 0, 0, 1);
MAT_SetElement(h_A, 1, 1, 2);
MAT_SetElement(h_B, 0, 0, 3);
MAT_SetElement(h_B, 1, 1, 4);
MAT_PrintMat(h_A);
MAT_PrintMat(h_B);
MAT_Sum(h_A, h_B, h_C);
MAT_PrintMat(h_C);
} | d9739e021babf2b3c2fdd3f460e0a7430718c97d.cu | //
// Created by Alex on 3/7/2017.
//
#include <stdio.h>
#include "mat.h"
#include "misc.h"
/* Private define ------------------------------------------------------------*/
/* Private typedef -----------------------------------------------------------*/
/* Private macro -------------------------------------------------------------*/
/* Private function prototypes -----------------------------------------------*/
/* Private variables ---------------------------------------------------------*/
static int Mat_Block_Size = 16;
/* ---------------------------------------------------------------------------*/
/**
*@brief Get a matrix element
*@param
*@retval None
*/
__host__ __device__ float MAT_GetElement(const Tp_fMat_TypeDef Mat, size_t row, size_t col)
{
return Mat.pElements[row * Mat.Width + col];
}
/**
*@brief Set a matrix element
*@param
*@retval None
*/
__host__ __device__ void MAT_SetElement(Tp_fMat_TypeDef Mat, size_t row, size_t col, float value)
{
Mat.pElements[row * Mat.Width + col] = value;
}
/**
*@brief Set all matrix elements with value
*@param
*@retval None
*/
__host__ __device__ void MAT_SetElementAll(Tp_fMat_TypeDef Mat, float value)
{
for (size_t i = 0; i < Mat.Height; i++)
{
for (size_t j = 0; j < Mat.Width; j++)
{
MAT_SetElement(Mat, i, j, value);
}
}
}
/**
*@brief Get a matrix element
*@param
*@retval None
*/
__host__ __device__ float *MAT_GetElementRef(const Tp_fMat_TypeDef Mat, size_t row, size_t col)
{
return &Mat.pElements[row * Mat.Width + col];
}
/**
*@brief Get a row vector
*@param
*@retval None
*/
__host__ __device__ float *MAT_GetRow_Vec(const Tp_fMat_TypeDef Mat, size_t row)
{
return &Mat.pElements[row * Mat.Width + 0];
}
/**
*@brief Print matrix value
*@param
*@retval None
*/
__host__ __device__ void MAT_PrintMat(Tp_fMat_TypeDef Mat)
{
for (size_t i = 0; i < Mat.Height; i++)
{
for (size_t j = 0; j < Mat.Width; j++)
{
printf("%.1f ", MAT_GetElement(Mat, i, j));
}
printf("\n");
}
printf("\n");
}
/**
*@brief Print vector values
*@param
*@retval None
*/
__host__ __device__ void MAT_PrintVecInt(Tp_intVec_TypeDef Vec)
{
for (size_t i = 0; i < Vec.Size; i++)
{
printf("%d ", Vec.pElements[i]);
}
printf("\n");
}
/**
*@brief Print vector values
*@param
*@retval None
*/
__host__ __device__ void MAT_PrintVecFloat(Tp_fVec_TypeDef Vec)
{
for (size_t i = 0; i < Vec.Size; i++)
{
printf("%.2f ", Vec.pElements[i]);
}
printf("\n");
}
/**
*@brief GPU - kernel
*@param
*@retval None
*/
__global__ void MAT_MulKernel(Tp_fMat_TypeDef A, Tp_fMat_TypeDef B, Tp_fMat_TypeDef C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0.0;
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < A.Height && col < B.Width)
{
for (int e = 0; e < A.Width; ++e)
{
Cvalue += (MAT_GetElement(A, row, col)) * (MAT_GetElement(B, row, col));
}
}
MAT_SetElement(C, row, col, Cvalue);
}
/**
*@brief GPU - kernel
*@param
*@retval None
*/
__global__ void MAT_SumKernel(Tp_fMat_TypeDef A, Tp_fMat_TypeDef B, Tp_fMat_TypeDef C)
{
size_t row = blockIdx.y * blockDim.y + threadIdx.y;
size_t col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < C.Height && col < C.Width)
{
MAT_SetElement(C, row, col, (MAT_GetElement(A, row, col) + MAT_GetElement(B, row, col)));
}
}
/**
*@brief Matrices multiplication
*@param
*@retval None
*/
void MAT_Mult(const Tp_fMat_TypeDef A, const Tp_fMat_TypeDef B, Tp_fMat_TypeDef C)
{
Tp_fMat_TypeDef d_A;
Tp_fMat_TypeDef d_B;
Tp_fMat_TypeDef d_C;
size_t Size;
d_A = A;
Size = A.Width * A.Height * sizeof(float);
checkCudaErrors(cudaMalloc(&d_A.pElements, Size));
checkCudaErrors(cudaMemcpy(d_A.pElements, A.pElements, Size, cudaMemcpyHostToDevice));
d_B = B;
Size = B.Width * B.Height * sizeof(float);
checkCudaErrors(cudaMalloc(&d_B.pElements, Size));
checkCudaErrors(cudaMemcpy(d_B.pElements, B.pElements, Size, cudaMemcpyHostToDevice));
d_C = C;
Size = C.Width * C.Height * sizeof(float);
checkCudaErrors(cudaMalloc(&d_C.pElements, Size));
// Invoke kernel
dim3 dimBlock(Mat_Block_Size, Mat_Block_Size);
dim3 dimGrid((B.Width + dimBlock.x - 1) / dimBlock.x, (A.Height + dimBlock.y - 1) / dimBlock.y);
MAT_MulKernel << < dimGrid, dimBlock >> > (d_A, d_B, d_C);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(C.pElements, d_C.pElements, Size, cudaMemcpyDeviceToHost));
// Free device memory
checkCudaErrors(cudaFree(d_A.pElements));
checkCudaErrors(cudaFree(d_B.pElements));
checkCudaErrors(cudaFree(d_C.pElements));
}
/**
*@brief Matrices sum
*@param
*@retval None
*/
void MAT_Sum(const Tp_fMat_TypeDef A, const Tp_fMat_TypeDef B, Tp_fMat_TypeDef C)
{
StopWatchInterface *timer = NULL;
Tp_fMat_TypeDef d_A;
Tp_fMat_TypeDef d_B;
Tp_fMat_TypeDef d_C;
size_t Size;
printf("\nGPU kernel MAT_Sum - Start\n");
sdkCreateTimer(&timer);
sdkResetTimer(&timer);
sdkStartTimer(&timer);
d_A = A;
Size = A.Width * A.Height * sizeof(float);
checkCudaErrors(cudaMalloc(&d_A.pElements, Size));
checkCudaErrors(cudaMemcpy(d_A.pElements, A.pElements, Size, cudaMemcpyHostToDevice));
d_B = B;
Size = B.Width * B.Height * sizeof(float);
checkCudaErrors(cudaMalloc(&d_B.pElements, Size));
checkCudaErrors(cudaMemcpy(d_B.pElements, B.pElements, Size, cudaMemcpyHostToDevice));
d_C = C;
Size = C.Width * C.Height * sizeof(float);
checkCudaErrors(cudaMalloc(&d_C.pElements, Size));
// Invoke kernel
dim3 dimBlock(Mat_Block_Size, Mat_Block_Size);
dim3 dimGrid((C.Width + dimBlock.x - 1) / dimBlock.x, (C.Height + dimBlock.y - 1) / dimBlock.y);
MAT_SumKernel << < dimGrid, dimBlock >> > (d_A, d_B, d_C);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpy(C.pElements, d_C.pElements, Size, cudaMemcpyDeviceToHost));
// Free device memory
checkCudaErrors(cudaFree(d_A.pElements));
checkCudaErrors(cudaFree(d_B.pElements));
checkCudaErrors(cudaFree(d_C.pElements));
sdkStopTimer(&timer);
printf("GPU kernel - Complete, time:%fms\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
}
#define MAT_TEST_WIDTH 4
#define MAT_TEST_HEIGHT 4
#define MAT_TEST_SIZE (MAT_TEST_WIDTH * MAT_TEST_HEIGHT)
/**
*@brief Test
*@param
*@retval None
*/
void MAT_Mult_Test(void)
{
MISC_Bl_Size_TypeDef tBl_Size;
float A_Arr[MAT_TEST_SIZE] = {0.0};
float B_Arr[MAT_TEST_SIZE] = {0.0};
float C_Arr[MAT_TEST_SIZE] = {0.0};
Tp_fMat_TypeDef h_A;
Tp_fMat_TypeDef h_B;
Tp_fMat_TypeDef h_C;
tBl_Size = MISC_Get_Block_Size();
Mat_Block_Size = tBl_Size.Bl_2d;
h_A.Width = MAT_TEST_WIDTH;
h_A.Height = MAT_TEST_HEIGHT;
h_A.pElements = A_Arr;
h_B.Width = MAT_TEST_WIDTH;
h_B.Height = MAT_TEST_HEIGHT;
h_B.pElements = B_Arr;
h_C.Width = MAT_TEST_WIDTH;
h_C.Height = MAT_TEST_HEIGHT;
h_C.pElements = C_Arr;
MAT_SetElement(h_A, 0, 0, 1);
MAT_SetElement(h_A, 1, 1, 2);
MAT_SetElement(h_B, 0, 0, 3);
MAT_SetElement(h_B, 1, 1, 4);
MAT_PrintMat(h_A);
MAT_PrintMat(h_B);
MAT_Sum(h_A, h_B, h_C);
MAT_PrintMat(h_C);
} |
7be81c6a7c54beb78f1f5d57c19ce3fcc4b143c9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <unistd.h>
#define ALLOC_SIZE 1024
__global__ void access_offset_kernel(int offset) {
int devMem[ALLOC_SIZE];
devMem[0] = 0; devMem[1] = devMem[0]; // for init/unused warnings
#ifdef R
if (offset >= 0)
volatile int i = devMem[(ALLOC_SIZE-1) + offset];
else
volatile int i = devMem[offset];
#elif W
if (offset >= 0)
devMem[(ALLOC_SIZE-1) + offset] = 42;
else
devMem[offset] = 42;
#endif
}
int main(int argc, char** argv) {
if (argc != 3) {
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
int offset = 0;
int c;
while ((c = getopt(argc, argv, "o:")) != -1) {
switch(c) {
case 'o':
offset = atoi(optarg);
break;
default:
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
}
hipLaunchKernelGGL(( access_offset_kernel), dim3(1),dim3(1), 0, 0, offset);
hipDeviceReset();
return 0;
}
| 7be81c6a7c54beb78f1f5d57c19ce3fcc4b143c9.cu | #include <stdio.h>
#include <unistd.h>
#define ALLOC_SIZE 1024
__global__ void access_offset_kernel(int offset) {
int devMem[ALLOC_SIZE];
devMem[0] = 0; devMem[1] = devMem[0]; // for init/unused warnings
#ifdef R
if (offset >= 0)
volatile int i = devMem[(ALLOC_SIZE-1) + offset];
else
volatile int i = devMem[offset];
#elif W
if (offset >= 0)
devMem[(ALLOC_SIZE-1) + offset] = 42;
else
devMem[offset] = 42;
#endif
}
int main(int argc, char** argv) {
if (argc != 3) {
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
int offset = 0;
int c;
while ((c = getopt(argc, argv, "o:")) != -1) {
switch(c) {
case 'o':
offset = atoi(optarg);
break;
default:
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
}
access_offset_kernel<<<1,1>>>(offset);
cudaDeviceReset();
return 0;
}
|
2068ca3caf7883870656e908978aa107d91e9f5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data,
int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
for (int i = 2; i < bottom.size(); ++i) {
caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_SUM:
caffe_gpu_set(count, Dtype(0.), top_data);
// TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1?
for (int i = 0; i < bottom.size(); ++i) {
caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask);
}
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
if (stable_prod_grad_) {
bool initialized = false;
for (int j = 0; j < bottom.size(); ++j) {
if (i == j) { continue; }
if (!initialized) {
caffe_copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff,
bottom_diff);
}
}
} else {
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
}
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
break;
case EltwiseParameter_EltwiseOp_SUM:
if (coeffs_[i] == Dtype(1.)) {
caffe_copy(count, top_diff, bottom_diff);
} else {
caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, i, mask, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
}
}
INSTANTIATE_CLASS(EltwiseLayer);
} // namespace caffe
| 2068ca3caf7883870656e908978aa107d91e9f5d.cu | #include <cfloat>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a,
const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data,
int* mask) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype maxval = -FLT_MAX;
int maxidx = -1;
if (bottom_data_a[index] > bottom_data_b[index]) {
// only update for very first bottom_data blob (blob_idx == 0)
if (blob_idx == 0) {
maxval = bottom_data_a[index];
top_data[index] = maxval;
maxidx = blob_idx;
mask[index] = maxidx;
}
} else {
maxval = bottom_data_b[index];
top_data[index] = maxval;
maxidx = blob_idx + 1;
mask[index] = maxidx;
}
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int* mask = NULL;
const int count = top[0]->count();
Dtype* top_data = top[0]->mutable_gpu_data();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(),
top_data);
for (int i = 2; i < bottom.size(); ++i) {
caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_SUM:
caffe_gpu_set(count, Dtype(0.), top_data);
// TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1?
for (int i = 0; i < bottom.size(); ++i) {
caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask);
for (int i = 2; i < bottom.size(); ++i) {
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask);
}
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff,
const int blob_idx, const int* mask, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
Dtype gradient = 0;
if (mask[index] == blob_idx) {
gradient += top_diff[index];
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const int* mask = NULL;
const int count = top[0]->count();
const Dtype* top_data = top[0]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
for (int i = 0; i < bottom.size(); ++i) {
if (propagate_down[i]) {
const Dtype* bottom_data = bottom[i]->gpu_data();
Dtype* bottom_diff = bottom[i]->mutable_gpu_diff();
switch (op_) {
case EltwiseParameter_EltwiseOp_PROD:
if (stable_prod_grad_) {
bool initialized = false;
for (int j = 0; j < bottom.size(); ++j) {
if (i == j) { continue; }
if (!initialized) {
caffe_copy(count, bottom[j]->gpu_data(), bottom_diff);
initialized = true;
} else {
caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff,
bottom_diff);
}
}
} else {
caffe_gpu_div(count, top_data, bottom_data, bottom_diff);
}
caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff);
break;
case EltwiseParameter_EltwiseOp_SUM:
if (coeffs_[i] == Dtype(1.)) {
caffe_copy(count, top_diff, bottom_diff);
} else {
caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff);
}
break;
case EltwiseParameter_EltwiseOp_MAX:
mask = max_idx_.gpu_data();
MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, i, mask, bottom_diff);
break;
default:
LOG(FATAL) << "Unknown elementwise operation.";
}
}
}
}
INSTANTIATE_CLASS(EltwiseLayer);
} // namespace caffe
|
d254680a9c5914c1d587455e7ecc381b4fff61ce.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
MPC code [double] (LnVs BIT LVs ZE): A GPU-based compressor for arrays of
double-precision floating-point values. See the following publication for
more information: http://cs.txstate.edu/~mb92/papers/cluster15.pdf.
Copyright (c) 2015-2020, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Annie Yang and Martin Burtscher
URL: The latest version of this code is available at
https://userweb.cs.txstate.edu/~burtscher/research/MPC/.
Publication: This work is described in detail in the following paper.
Annie Yang, Hari Mukka, Farbod Hesaaraki, and Martin Burtscher. MPC: A
Massively Parallel Compression Algorithm for Scientific Data. Proceedings
of the IEEE International Conference on Cluster Computing, pp. 381-389.
September 2015.
*/
#include <cstdio>
#include <cassert>
#include <string>
#include <sys/time.h>
#include "utils.h"
using std::string;
#define TPB 1024 /* do not change */
#if (CUDART_VERSION >= 9000)
#define __shfl_up(v, d) __shfl_up_sync(0xffffffff, v, d)
#endif
static inline __device__
void prefixsum(int &val, int sbuf[TPB])
{
const int warp = threadIdx.x >> 5;
const int lane = threadIdx.x & 31;
for (int d = 1; d < 32; d *= 2) {
int tmp = __shfl_up(val, d);
if (lane >= d) val += tmp;
}
if (lane == 31) sbuf[warp] = val;
__syncthreads();
if (warp == 0) {
int v = sbuf[lane];
for (int d = 1; d < 32; d *= 2) {
int tmp = __shfl_up(v, d);
if (lane >= d) v += tmp;
}
sbuf[lane] = v;
}
__syncthreads();
if (warp > 0) {
val += sbuf[warp - 1];
}
}
static inline __device__
void prefixsumlong(long &val, long sbuf[TPB])
{
const int warp = threadIdx.x >> 5;
const int lane = threadIdx.x & 31;
for (int d = 1; d < 32; d *= 2) {
unsigned int tmpl = __shfl_up((int)val, d);
long tmph = __shfl_up((int)(val >> 32), d);
if (lane >= d) val += (tmph << 32) + tmpl;
}
if (lane == 31) sbuf[warp] = val;
__syncthreads();
if (warp == 0) {
long v = sbuf[lane];
for (int d = 1; d < 32; d *= 2) {
unsigned int tmpl = __shfl_up((int)v, d);
long tmph = __shfl_up((int)(v >> 32), d);
if (lane >= d) v += (tmph << 32) + tmpl;
}
sbuf[lane] = v;
}
__syncthreads();
if (warp > 0) {
val += sbuf[warp - 1];
}
}
static inline __device__
void prefixsumdimlong(long &val, long sbuf[TPB], const unsigned char dim)
{
const int tid = threadIdx.x;
const int warp = tid >> 5;
const int lane = tid & 31;
const int tix = (warp * dim) + (tid % dim);
for (int d = dim; d < 32; d *= 2) {
unsigned int tmpl = __shfl_up((int)val, d);
long tmph = __shfl_up((int)(val >> 32), d);
if (lane >= d) val += (tmph << 32) + tmpl;
}
if ((lane + dim) > 31) sbuf[tix] = val;
__syncthreads();
if (warp < dim) {
const int idx = (lane * dim) + warp;
long v = sbuf[idx];
for (int d = 1; d < 32; d *= 2) {
unsigned int tmpl = __shfl_up((int)v, d);
long tmph = __shfl_up((int)(v >> 32), d);
if (lane >= d) v += (tmph << 32) + tmpl;
}
sbuf[idx] = v;
}
__syncthreads();
if (warp > 0) {
val += sbuf[tix - dim];
}
}
/*****************************************************************************
This is the GPU compression kernel, which requires 1024 threads per block and
should be launched with as many blocks as the GPU can run simultaneously.
Inputs
------
n: the number of double values to be compressed
original: the input array holding the n doubles (has to be cast to a long array)
goffset: a temporary array with m elements where m = number of thread blocks
dim: the dimensionality of the input data (dim must be between 1 and 32)
Output
------
compressed: the output array that holds the compressed data in integer format
The output array needs to provide space for up to 2 + n + (n + 31) / 32 elements.
The upper half of the first element specifies how many elements are actually
used. It should be replaced by the value n before the data is further processed.
*****************************************************************************/
static __global__ __launch_bounds__(1024, 2)
void MPCcompress(
const int n,
long* __restrict__ const original,
long* __restrict__ const compressed,
volatile int* __restrict__ const goffset,
unsigned char dim)
{
const int tid = threadIdx.x;
const int tidm1 = tid - 1;
const int tidmdim = tid - dim;
const int lanex = tid & 63;
const int warpx = tid & 0x3c0;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bid1 = ((bid + 1) == gdim) ? 0 : (bid + 1);
const int init = 1 + (n + 63) / 64;
const int chunksm1 = ((n + (TPB - 1)) / TPB) - 1;
__shared__ int start, top;
__shared__ long sbuf1[TPB], sbuf2[TPB];
for (int chunk = bid; chunk <= chunksm1; chunk += gdim) {
const int idx = tid + chunk * TPB;
long v1 = 0;
if (idx < n) {
v1 = original[idx];
sbuf1[tid] = v1;
}
__syncthreads();
if (tid >= dim) {
if (idx < n) {
v1 -= sbuf1[tidmdim];
}
}
sbuf2[tid] = v1;
__syncthreads();
long v2 = 0;
for (int i = 63; i >= 0; i--) {
v2 = (v2 << 1) + ((sbuf2[warpx + i] >> lanex) & 1);
}
sbuf1[tid] = v2;
__syncthreads();
if (tid > 0) {
v2 -= sbuf1[tidm1];
}
int loc = 0;
if (v2 != 0) loc = 1;
#if (CUDART_VERSION < 9000)
unsigned int bitmap = __ballot(loc);
#else
unsigned int bitmap = __ballot_sync(0xffffffff, loc);
#endif
if (lanex == 32) {
sbuf2[tid] = bitmap;
}
__syncthreads();
if (lanex == 0) {
if (idx < n) compressed[1 + idx / 64] = (sbuf2[tid + 32] << 32) + bitmap;
}
prefixsum(loc, (int*)sbuf1);
if (v2 != 0) {
sbuf2[loc - 1] = v2;
}
if (tid == (TPB - 1)) {
int st = init;
if (chunk > 0) {
do {
st = goffset[bid];
} while (st < 0); // busy waiting
}
goffset[bid1] = st + loc;
goffset[bid] = -1;
if (chunk == chunksm1) {
compressed[0] = (((long)(st + loc)) << 32) + (0x43504d00 - 1) + dim;
}
top = loc;
start = st;
}
__syncthreads();
if (tid < top) {
compressed[start + tid] = sbuf2[tid];
}
}
}
/*****************************************************************************
This is the GPU decompression kernel, which requires 1024 threads per block
and should be launched with as many blocks as the GPU can run simultaneously.
Inputs
------
compressed: the input array holding the compressed data in integer format
goffset: a temporary array with m elements where m = number of thread blocks
The upper half of the first element must hold the value n, i.e., the number
of doubles that the data will generate upon decompression.
Output
------
decompressed: the output array holding the decompressed data in integer format
The output array needs to provide space for n elements has to be cast to an
array of doubles before it can be used.
*****************************************************************************/
static __global__ __launch_bounds__(1024, 2)
void MPCdecompress(
long* __restrict__ const compressed,
long* __restrict__ const decompressed,
volatile int* __restrict__ const goffset)
{
const int dim = (compressed[0] & 31) + 1;
const int n = compressed[0] >> 32;
const int tid = threadIdx.x;
const int lanex = tid & 63;
const int warpx = tid & 0x3c0;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bid1 = ((bid + 1) == gdim) ? 0 : (bid + 1);
const int init = 1 + (n + 63) / 64;
const int nru = (n - 1) | 63;
const int chunksm1 = ((n + (TPB - 1)) / TPB) - 1;
__shared__ int start, top;
__shared__ long sbuf1[TPB], sbuf2[TPB];
for (int chunk = bid; chunk <= chunksm1; chunk += gdim) {
const int idx = tid + chunk * TPB;
int flag = 0;
if (idx <= nru) {
flag = (compressed[1 + idx / 64] >> lanex) & 1;
}
int loc = flag;
__syncthreads();
prefixsum(loc, (int*)sbuf1);
if (tid == (TPB - 1)) {
int st = init;
if (chunk > 0) {
do {
st = goffset[bid];
} while (st < 0); // busy waiting
}
goffset[bid1] = st + loc;
goffset[bid] = -1;
top = loc;
start = st;
}
__syncthreads();
if (tid < top) {
sbuf2[tid] = compressed[start + tid];
}
__syncthreads();
long v2 = 0;
if (flag != 0) {
v2 = sbuf2[loc - 1];
}
prefixsumlong(v2, sbuf1);
sbuf2[tid] = v2;
__syncthreads();
long v1 = 0;
for (int i = 63; i >= 0; i--) {
v1 = (v1 << 1) + ((sbuf2[warpx + i] >> lanex) & 1);
}
prefixsumdimlong(v1, sbuf1, dim);
if (idx < n) {
decompressed[idx] = v1;
}
}
}
int main(int argc, char *argv[])
{
printf("MPC - Massively Parallel Compression [double] (%s)\n", __FILE__);
printf("Copyright 2015-2020 Texas State University\n\n");
assert(sizeof(long) == sizeof(double));
if ((argc != 2) && (argc != 3)) {
printf("usage: %s file [dimension]\n", argv[0]);
exit(-1);
}
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
const int blocks = deviceProp.multiProcessorCount * 2;
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
int dim, insize, outsize;
string name = argv[1];
long* const input = readFile(name.c_str(), insize);
if (argc == 3) {
dim = atoi(argv[2]);
outsize = insize + 1 + (insize + 63) / 64;
} else {
assert(((input[0] >> 8) & 0xffffff) == 0x43504d);
dim = (input[0] & 31) + 1;
outsize = input[0] >> 32;
}
assert(0 < dim); assert(dim <= 32);
long* const output = new long[outsize];
long *d_in, *d_out;
int *d_offs;
hipMalloc(&d_in, insize * sizeof(long));
hipMalloc(&d_out, outsize * sizeof(long));
hipMalloc(&d_offs, blocks * sizeof(int));
hipMemcpy(d_in, input, insize * sizeof(long), hipMemcpyHostToDevice);
struct timeval start, end;
if (argc == 3) {
gettimeofday(&start, NULL);
hipMemset(d_offs, -1, blocks * sizeof(int));
hipLaunchKernelGGL(( MPCcompress), dim3(blocks), dim3(TPB), 0, 0, insize, d_in, d_out, d_offs, dim);
hipDeviceSynchronize();
gettimeofday(&end, NULL);
hipMemcpy(output, d_out, sizeof(long), hipMemcpyDeviceToHost);
outsize = output[0] >> 32;
double ctime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("compression time: %.2f ms\n", 1000.0 * ctime);
printf("compression throughput: %.3f GB/s\n", 0.000000001 * sizeof(long) * insize / ctime);
printf("compression ratio: %.3f\n\n", 1.0 * insize / outsize);
hipMemcpy(output, d_out, outsize * sizeof(long), hipMemcpyDeviceToHost);
output[0] = (((long)insize) << 32) + (0x43504d00 - 1) + dim;
name += ".mpc";
} else {
gettimeofday(&start, NULL);
hipMemset(d_offs, -1, blocks * sizeof(int));
hipLaunchKernelGGL(( MPCdecompress), dim3(blocks), dim3(TPB), 0, 0, d_in, d_out, d_offs);
hipDeviceSynchronize();
gettimeofday(&end, NULL);
hipMemcpy(output, d_out, outsize * sizeof(long), hipMemcpyDeviceToHost);
double dtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("decompression time: %.2f ms\n", 1000.0 * dtime);
printf("decompression throughput: %.3f GB/s\n\n", 0.000000001 * sizeof(long) * outsize / dtime);
name += ".org";
}
writeFile(name.c_str(), output, outsize);
delete [] output;
delete [] input;
hipFree(d_offs);
hipFree(d_out);
hipFree(d_in);
return 0;
}
| d254680a9c5914c1d587455e7ecc381b4fff61ce.cu | /*
MPC code [double] (LnVs BIT LVs ZE): A GPU-based compressor for arrays of
double-precision floating-point values. See the following publication for
more information: http://cs.txstate.edu/~mb92/papers/cluster15.pdf.
Copyright (c) 2015-2020, Texas State University. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Texas State University nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL TEXAS STATE UNIVERSITY BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors: Annie Yang and Martin Burtscher
URL: The latest version of this code is available at
https://userweb.cs.txstate.edu/~burtscher/research/MPC/.
Publication: This work is described in detail in the following paper.
Annie Yang, Hari Mukka, Farbod Hesaaraki, and Martin Burtscher. MPC: A
Massively Parallel Compression Algorithm for Scientific Data. Proceedings
of the IEEE International Conference on Cluster Computing, pp. 381-389.
September 2015.
*/
#include <cstdio>
#include <cassert>
#include <string>
#include <sys/time.h>
#include "utils.h"
using std::string;
#define TPB 1024 /* do not change */
#if (CUDART_VERSION >= 9000)
#define __shfl_up(v, d) __shfl_up_sync(0xffffffff, v, d)
#endif
static inline __device__
void prefixsum(int &val, int sbuf[TPB])
{
const int warp = threadIdx.x >> 5;
const int lane = threadIdx.x & 31;
for (int d = 1; d < 32; d *= 2) {
int tmp = __shfl_up(val, d);
if (lane >= d) val += tmp;
}
if (lane == 31) sbuf[warp] = val;
__syncthreads();
if (warp == 0) {
int v = sbuf[lane];
for (int d = 1; d < 32; d *= 2) {
int tmp = __shfl_up(v, d);
if (lane >= d) v += tmp;
}
sbuf[lane] = v;
}
__syncthreads();
if (warp > 0) {
val += sbuf[warp - 1];
}
}
static inline __device__
void prefixsumlong(long &val, long sbuf[TPB])
{
const int warp = threadIdx.x >> 5;
const int lane = threadIdx.x & 31;
for (int d = 1; d < 32; d *= 2) {
unsigned int tmpl = __shfl_up((int)val, d);
long tmph = __shfl_up((int)(val >> 32), d);
if (lane >= d) val += (tmph << 32) + tmpl;
}
if (lane == 31) sbuf[warp] = val;
__syncthreads();
if (warp == 0) {
long v = sbuf[lane];
for (int d = 1; d < 32; d *= 2) {
unsigned int tmpl = __shfl_up((int)v, d);
long tmph = __shfl_up((int)(v >> 32), d);
if (lane >= d) v += (tmph << 32) + tmpl;
}
sbuf[lane] = v;
}
__syncthreads();
if (warp > 0) {
val += sbuf[warp - 1];
}
}
static inline __device__
void prefixsumdimlong(long &val, long sbuf[TPB], const unsigned char dim)
{
const int tid = threadIdx.x;
const int warp = tid >> 5;
const int lane = tid & 31;
const int tix = (warp * dim) + (tid % dim);
for (int d = dim; d < 32; d *= 2) {
unsigned int tmpl = __shfl_up((int)val, d);
long tmph = __shfl_up((int)(val >> 32), d);
if (lane >= d) val += (tmph << 32) + tmpl;
}
if ((lane + dim) > 31) sbuf[tix] = val;
__syncthreads();
if (warp < dim) {
const int idx = (lane * dim) + warp;
long v = sbuf[idx];
for (int d = 1; d < 32; d *= 2) {
unsigned int tmpl = __shfl_up((int)v, d);
long tmph = __shfl_up((int)(v >> 32), d);
if (lane >= d) v += (tmph << 32) + tmpl;
}
sbuf[idx] = v;
}
__syncthreads();
if (warp > 0) {
val += sbuf[tix - dim];
}
}
/*****************************************************************************
This is the GPU compression kernel, which requires 1024 threads per block and
should be launched with as many blocks as the GPU can run simultaneously.
Inputs
------
n: the number of double values to be compressed
original: the input array holding the n doubles (has to be cast to a long array)
goffset: a temporary array with m elements where m = number of thread blocks
dim: the dimensionality of the input data (dim must be between 1 and 32)
Output
------
compressed: the output array that holds the compressed data in integer format
The output array needs to provide space for up to 2 + n + (n + 31) / 32 elements.
The upper half of the first element specifies how many elements are actually
used. It should be replaced by the value n before the data is further processed.
*****************************************************************************/
static __global__ __launch_bounds__(1024, 2)
void MPCcompress(
const int n,
long* __restrict__ const original,
long* __restrict__ const compressed,
volatile int* __restrict__ const goffset,
unsigned char dim)
{
const int tid = threadIdx.x;
const int tidm1 = tid - 1;
const int tidmdim = tid - dim;
const int lanex = tid & 63;
const int warpx = tid & 0x3c0;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bid1 = ((bid + 1) == gdim) ? 0 : (bid + 1);
const int init = 1 + (n + 63) / 64;
const int chunksm1 = ((n + (TPB - 1)) / TPB) - 1;
__shared__ int start, top;
__shared__ long sbuf1[TPB], sbuf2[TPB];
for (int chunk = bid; chunk <= chunksm1; chunk += gdim) {
const int idx = tid + chunk * TPB;
long v1 = 0;
if (idx < n) {
v1 = original[idx];
sbuf1[tid] = v1;
}
__syncthreads();
if (tid >= dim) {
if (idx < n) {
v1 -= sbuf1[tidmdim];
}
}
sbuf2[tid] = v1;
__syncthreads();
long v2 = 0;
for (int i = 63; i >= 0; i--) {
v2 = (v2 << 1) + ((sbuf2[warpx + i] >> lanex) & 1);
}
sbuf1[tid] = v2;
__syncthreads();
if (tid > 0) {
v2 -= sbuf1[tidm1];
}
int loc = 0;
if (v2 != 0) loc = 1;
#if (CUDART_VERSION < 9000)
unsigned int bitmap = __ballot(loc);
#else
unsigned int bitmap = __ballot_sync(0xffffffff, loc);
#endif
if (lanex == 32) {
sbuf2[tid] = bitmap;
}
__syncthreads();
if (lanex == 0) {
if (idx < n) compressed[1 + idx / 64] = (sbuf2[tid + 32] << 32) + bitmap;
}
prefixsum(loc, (int*)sbuf1);
if (v2 != 0) {
sbuf2[loc - 1] = v2;
}
if (tid == (TPB - 1)) {
int st = init;
if (chunk > 0) {
do {
st = goffset[bid];
} while (st < 0); // busy waiting
}
goffset[bid1] = st + loc;
goffset[bid] = -1;
if (chunk == chunksm1) {
compressed[0] = (((long)(st + loc)) << 32) + (0x43504d00 - 1) + dim;
}
top = loc;
start = st;
}
__syncthreads();
if (tid < top) {
compressed[start + tid] = sbuf2[tid];
}
}
}
/*****************************************************************************
This is the GPU decompression kernel, which requires 1024 threads per block
and should be launched with as many blocks as the GPU can run simultaneously.
Inputs
------
compressed: the input array holding the compressed data in integer format
goffset: a temporary array with m elements where m = number of thread blocks
The upper half of the first element must hold the value n, i.e., the number
of doubles that the data will generate upon decompression.
Output
------
decompressed: the output array holding the decompressed data in integer format
The output array needs to provide space for n elements has to be cast to an
array of doubles before it can be used.
*****************************************************************************/
static __global__ __launch_bounds__(1024, 2)
void MPCdecompress(
long* __restrict__ const compressed,
long* __restrict__ const decompressed,
volatile int* __restrict__ const goffset)
{
const int dim = (compressed[0] & 31) + 1;
const int n = compressed[0] >> 32;
const int tid = threadIdx.x;
const int lanex = tid & 63;
const int warpx = tid & 0x3c0;
const int bid = blockIdx.x;
const int gdim = gridDim.x;
const int bid1 = ((bid + 1) == gdim) ? 0 : (bid + 1);
const int init = 1 + (n + 63) / 64;
const int nru = (n - 1) | 63;
const int chunksm1 = ((n + (TPB - 1)) / TPB) - 1;
__shared__ int start, top;
__shared__ long sbuf1[TPB], sbuf2[TPB];
for (int chunk = bid; chunk <= chunksm1; chunk += gdim) {
const int idx = tid + chunk * TPB;
int flag = 0;
if (idx <= nru) {
flag = (compressed[1 + idx / 64] >> lanex) & 1;
}
int loc = flag;
__syncthreads();
prefixsum(loc, (int*)sbuf1);
if (tid == (TPB - 1)) {
int st = init;
if (chunk > 0) {
do {
st = goffset[bid];
} while (st < 0); // busy waiting
}
goffset[bid1] = st + loc;
goffset[bid] = -1;
top = loc;
start = st;
}
__syncthreads();
if (tid < top) {
sbuf2[tid] = compressed[start + tid];
}
__syncthreads();
long v2 = 0;
if (flag != 0) {
v2 = sbuf2[loc - 1];
}
prefixsumlong(v2, sbuf1);
sbuf2[tid] = v2;
__syncthreads();
long v1 = 0;
for (int i = 63; i >= 0; i--) {
v1 = (v1 << 1) + ((sbuf2[warpx + i] >> lanex) & 1);
}
prefixsumdimlong(v1, sbuf1, dim);
if (idx < n) {
decompressed[idx] = v1;
}
}
}
int main(int argc, char *argv[])
{
printf("MPC - Massively Parallel Compression [double] (%s)\n", __FILE__);
printf("Copyright 2015-2020 Texas State University\n\n");
assert(sizeof(long) == sizeof(double));
if ((argc != 2) && (argc != 3)) {
printf("usage: %s file [dimension]\n", argv[0]);
exit(-1);
}
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
const int blocks = deviceProp.multiProcessorCount * 2;
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
int dim, insize, outsize;
string name = argv[1];
long* const input = readFile(name.c_str(), insize);
if (argc == 3) {
dim = atoi(argv[2]);
outsize = insize + 1 + (insize + 63) / 64;
} else {
assert(((input[0] >> 8) & 0xffffff) == 0x43504d);
dim = (input[0] & 31) + 1;
outsize = input[0] >> 32;
}
assert(0 < dim); assert(dim <= 32);
long* const output = new long[outsize];
long *d_in, *d_out;
int *d_offs;
cudaMalloc(&d_in, insize * sizeof(long));
cudaMalloc(&d_out, outsize * sizeof(long));
cudaMalloc(&d_offs, blocks * sizeof(int));
cudaMemcpy(d_in, input, insize * sizeof(long), cudaMemcpyHostToDevice);
struct timeval start, end;
if (argc == 3) {
gettimeofday(&start, NULL);
cudaMemset(d_offs, -1, blocks * sizeof(int));
MPCcompress<<<blocks, TPB>>>(insize, d_in, d_out, d_offs, dim);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
cudaMemcpy(output, d_out, sizeof(long), cudaMemcpyDeviceToHost);
outsize = output[0] >> 32;
double ctime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("compression time: %.2f ms\n", 1000.0 * ctime);
printf("compression throughput: %.3f GB/s\n", 0.000000001 * sizeof(long) * insize / ctime);
printf("compression ratio: %.3f\n\n", 1.0 * insize / outsize);
cudaMemcpy(output, d_out, outsize * sizeof(long), cudaMemcpyDeviceToHost);
output[0] = (((long)insize) << 32) + (0x43504d00 - 1) + dim;
name += ".mpc";
} else {
gettimeofday(&start, NULL);
cudaMemset(d_offs, -1, blocks * sizeof(int));
MPCdecompress<<<blocks, TPB>>>(d_in, d_out, d_offs);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
cudaMemcpy(output, d_out, outsize * sizeof(long), cudaMemcpyDeviceToHost);
double dtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("decompression time: %.2f ms\n", 1000.0 * dtime);
printf("decompression throughput: %.3f GB/s\n\n", 0.000000001 * sizeof(long) * outsize / dtime);
name += ".org";
}
writeFile(name.c_str(), output, outsize);
delete [] output;
delete [] input;
cudaFree(d_offs);
cudaFree(d_out);
cudaFree(d_in);
return 0;
}
|
b49375b8489e513f462e0289bacd6cebd5662f93.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
static unsigned int GRID_SIZE_N;
static unsigned int GRID_SIZE_4N;
static unsigned int MAX_STATE_VALUE;
__global__ static void cudaSumTTGammaKernel(unsigned char *tipX1, unsigned char *tipX2, double *tipVector, double *sumtable, int limit) {
const int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n >= limit) {
return;
}
const int i = n / 4, j = n % 4;
double *left = &(tipVector[4 * tipX1[i]]);
double *right = &(tipVector[4 * tipX2[i]]);
double *sum = &sumtable[i * 16 + j * 4];
#pragma unroll
for (int k = 0; k < 4; k++) {
sum[k] = left[k] * right[k];
}
} | b49375b8489e513f462e0289bacd6cebd5662f93.cu | #include "includes.h"
static unsigned int GRID_SIZE_N;
static unsigned int GRID_SIZE_4N;
static unsigned int MAX_STATE_VALUE;
__global__ static void cudaSumTTGammaKernel(unsigned char *tipX1, unsigned char *tipX2, double *tipVector, double *sumtable, int limit) {
const int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n >= limit) {
return;
}
const int i = n / 4, j = n % 4;
double *left = &(tipVector[4 * tipX1[i]]);
double *right = &(tipVector[4 * tipX2[i]]);
double *sum = &sumtable[i * 16 + j * 4];
#pragma unroll
for (int k = 0; k < 4; k++) {
sum[k] = left[k] * right[k];
}
} |
79c36790938a1b25af34d9aea929d2c1abe62be2.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file cc_app.cu
*
* @brief connected component (CC) application
*/
#include <gunrock/gunrock.h>
// graph construction utilities
#include <gunrock/graphio/market.cuh>
// connected component includes
#include <gunrock/app/cc/cc_enactor.cuh>
#include <gunrock/app/cc/cc_problem.cuh>
#include <gunrock/app/cc/cc_functor.cuh>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::cc;
/**
* @brief CC_Parameter structure
*/
struct CC_Parameter : gunrock::app::TestParameter_Base
{
public:
CC_Parameter() { }
~CC_Parameter() { }
};
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK >
void runCC(GRGraph* output, CC_Parameter *parameter);
/**
* @brief Run test
*
* @tparam VertexId Vertex identifier type
* @tparam Value Attribute type
* @tparam SizeT Graph size type
* @tparam INSTRUMENT Keep kernels statics
* @tparam DEBUG Keep debug statics
*
* @param[out] output Pointer to output graph structure of the problem
* @param[in] parameter primitive-specific test parameters
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG >
void sizeCheckCC(GRGraph* output, CC_Parameter *parameter)
{
if (parameter->size_check)
runCC<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
true > (output, parameter);
else
runCC<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
false> (output, parameter);
}
/**
* @brief Run test
*
* @tparam VertexId Vertex identifier type
* @tparam Value Attribute type
* @tparam SizeT Graph size type
* @tparam INSTRUMENT Keep kernels statics
*
* @param[out] output Pointer to output graph structure of the problem
* @param[in] parameter primitive-specific test parameters
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT >
void debugCC(GRGraph* output, CC_Parameter *parameter)
{
if (parameter->debug)
sizeCheckCC<VertexId, Value, SizeT, INSTRUMENT,
true > (output, parameter);
else
sizeCheckCC<VertexId, Value, SizeT, INSTRUMENT,
false> (output, parameter);
}
/**
* @brief Run test
*
* @tparam VertexId Vertex identifier type
* @tparam Value Attribute type
* @tparam SizeT Graph size type
*
* @param[out] output Pointer to output graph structure of the problem
* @param[in] parameter primitive-specific test parameters
*/
template <
typename VertexId,
typename Value,
typename SizeT >
void instrumentedCC(GRGraph* output, CC_Parameter *parameter)
{
if (parameter->instrumented)
debugCC<VertexId, Value, SizeT, true>(output, parameter);
else
debugCC<VertexId, Value, SizeT, false>(output, parameter);
}
/**
* @brief Run test
*
* @tparam VertexId Vertex identifier type
* @tparam Value Attribute type
* @tparam SizeT Graph size type
* @tparam INSTRUMENT Keep kernels statics
* @tparam DEBUG Keep debug statics
* @tparam SIZE_CHECK Enable size check
*
* @param[out] output Pointer to output graph structure of the problem
* @param[in] parameter primitive-specific test parameters
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK >
void runCC(GRGraph* output, CC_Parameter *parameter)
{
typedef CCProblem < VertexId,
SizeT,
Value,
false > CcProblem; // use double buffer
typedef CCEnactor < CcProblem,
INSTRUMENT,
DEBUG,
SIZE_CHECK > CcEnactor;
Csr<VertexId, Value, SizeT> *graph =
(Csr<VertexId, Value, SizeT>*)parameter->graph;
bool quiet = parameter -> g_quiet;
int max_grid_size = parameter -> max_grid_size;
int num_gpus = parameter -> num_gpus;
double max_queue_sizing = parameter -> max_queue_sizing;
double max_in_sizing = parameter -> max_in_sizing;
ContextPtr *context = (ContextPtr*)parameter -> context;
std::string partition_method = parameter -> partition_method;
int *gpu_idx = parameter -> gpu_idx;
hipStream_t *streams = parameter -> streams;
float partition_factor = parameter -> partition_factor;
int partition_seed = parameter -> partition_seed;
bool g_stream_from_host = parameter -> g_stream_from_host;
size_t *org_size = new size_t [num_gpus];
// Allocate host-side label array
VertexId *h_component_ids = new VertexId[graph->nodes];
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t dummy;
hipSetDevice(gpu_idx[gpu]);
hipMemGetInfo(&(org_size[gpu]), &dummy);
}
CcEnactor* enactor = new CcEnactor(num_gpus, gpu_idx); // CC enactor map
CcProblem* problem = new CcProblem; // Allocate problem on GPU
util::GRError(
problem->Init(
g_stream_from_host,
graph,
NULL,
num_gpus,
gpu_idx,
partition_method,
streams,
max_queue_sizing,
max_in_sizing,
partition_factor,
partition_seed),
"CC Problem Initialization Failed", __FILE__, __LINE__);
util::GRError(
enactor->Init(context, problem, max_grid_size),
"CC Enactor Init failed", __FILE__, __LINE__);
// Perform CC
CpuTimer cpu_timer;
util::GRError(
problem->Reset(enactor->GetFrontierType(), max_queue_sizing),
"CC Problem Data Reset Failed", __FILE__, __LINE__);
util::GRError(
enactor->Reset(), "CC Enactor Reset failed", __FILE__, __LINE__);
cpu_timer.Start();
util::GRError(
enactor->Enact(), "CC Problem Enact Failed", __FILE__, __LINE__);
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
problem->Extract(h_component_ids),
"CC Problem Data Extraction Failed", __FILE__, __LINE__);
unsigned int num_components = problem->num_components;
output->aggregation = (unsigned int*)&num_components;
output->node_value1 = (VertexId*)&h_component_ids[0];
if (!quiet)
{
printf(" GPU Connected Component finished in %lf msec.\n", elapsed);
}
// Clean up
if (org_size) { delete[] org_size; org_size = NULL; }
if (problem ) { delete problem ; problem = NULL; }
if (enactor ) { delete enactor ; enactor = NULL; }
}
/**
* @brief Dispatch function to handle configurations
*
* @param[out] grapho Pointer to output graph structure of the problem
* @param[in] graphi Pointer to input graph we need to process on
* @param[in] config Primitive-specific configurations
* @param[in] data_t Data type configurations
* @param[in] context ModernGPU context
* @param[in] streams CUDA stream
*/
void dispatch_cc(
GRGraph* grapho,
const GRGraph* graphi,
const GRSetup config,
const GRTypes data_t,
ContextPtr* context,
hipStream_t* streams)
{
CC_Parameter *parameter = new CC_Parameter;
parameter->context = context;
parameter->streams = streams;
parameter->g_quiet = config.quiet;
parameter->num_gpus = config.num_devices;
parameter->gpu_idx = config.device_list;
switch (data_t.VTXID_TYPE)
{
case VTXID_INT:
{
switch (data_t.SIZET_TYPE)
{
case SIZET_INT:
{
switch (data_t.VALUE_TYPE)
{
case VALUE_INT: // template type = <int, int, int>
{
// build input CSR format graph
Csr<int, int, int> csr(false);
csr.nodes = graphi->num_nodes;
csr.edges = graphi->num_edges;
csr.row_offsets = (int*)graphi->row_offsets;
csr.column_indices = (int*)graphi->col_indices;
parameter->graph = &csr;
instrumentedCC<int, int, int>(grapho, parameter);
// reset for free memory
csr.row_offsets = NULL;
csr.column_indices = NULL;
break;
}
case VALUE_UINT: // template type = <int, uint, int>
{
printf("Not Yet Support This DataType Combination.\n");
break;
}
case VALUE_FLOAT: // template type = <int, float, int>
{
printf("Not Yet Support This DataType Combination.\n");
break;
}
}
break;
}
}
break;
}
}
}
/*
* @brief Entry of gunrock_cc function
*
* @param[out] grapho Pointer to output graph structure of the problem
* @param[in] graphi Pointer to input graph we need to process on
* @param[in] config Gunrock primitive specific configurations
* @param[in] data_t Gunrock data type structure
*/
void gunrock_cc(
GRGraph *grapho,
const GRGraph *graphi,
const GRSetup config,
const GRTypes data_t)
{
// GPU-related configurations
int num_gpus = 0;
int *gpu_idx = NULL;
ContextPtr *context = NULL;
hipStream_t *streams = NULL;
num_gpus = config.num_devices;
gpu_idx = new int [num_gpus];
for (int i = 0; i < num_gpus; ++i)
{
gpu_idx[i] = config.device_list[i];
}
// Create streams and MordernGPU context for each GPU
streams = new hipStream_t[num_gpus * num_gpus * 2];
context = new ContextPtr[num_gpus * num_gpus];
if (!config.quiet) { printf(" using %d GPUs:", num_gpus); }
for (int gpu = 0; gpu < num_gpus; ++gpu)
{
if (!config.quiet) { printf(" %d ", gpu_idx[gpu]); }
util::SetDevice(gpu_idx[gpu]);
for (int i = 0; i < num_gpus * 2; ++i)
{
int _i = gpu * num_gpus * 2 + i;
util::GRError(hipStreamCreate(&streams[_i]),
"hipStreamCreate fialed.", __FILE__, __LINE__);
if (i < num_gpus)
{
context[gpu * num_gpus + i] =
mgpu::CreateCudaDeviceAttachStream(gpu_idx[gpu],
streams[_i]);
}
}
}
if (!config.quiet) { printf("\n"); }
dispatch_cc(grapho, graphi, config, data_t, context, streams);
}
/*
* @brief Simple interface take in CSR arrays as input
*
* @param[out] components Return component ID for each node
* @param[out] num_comps Return number of components calculated
* @param[in] num_nodes Number of nodes of the input graph
* @param[in] num_edges Number of edges of the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
*/
int cc(
int* component,
const int num_nodes,
const int num_edges,
const int* row_offsets,
const int* col_indices)
{
struct GRTypes data_t; // primitive-specific data types
data_t.VTXID_TYPE = VTXID_INT; // integer vertex identifier
data_t.SIZET_TYPE = SIZET_INT; // integer graph size type
data_t.VALUE_TYPE = VALUE_INT; // integer attributes type
struct GRSetup config = InitSetup(); // primitive-specific configures
struct GRGraph *grapho = (struct GRGraph*)malloc(sizeof(struct GRGraph));
struct GRGraph *graphi = (struct GRGraph*)malloc(sizeof(struct GRGraph));
graphi->num_nodes = num_nodes; // setting graph nodes
graphi->num_edges = num_edges; // setting graph edges
graphi->row_offsets = (void*)&row_offsets[0]; // setting row_offsets
graphi->col_indices = (void*)&col_indices[0]; // setting col_indices
gunrock_cc(grapho, graphi, config, data_t);
int* num_components = (int*)grapho->aggregation;
memcpy(component, (int*)grapho->node_value1, num_nodes * sizeof(int));
if (graphi) free(graphi);
if (grapho) free(grapho);
return *num_components;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 79c36790938a1b25af34d9aea929d2c1abe62be2.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file cc_app.cu
*
* @brief connected component (CC) application
*/
#include <gunrock/gunrock.h>
// graph construction utilities
#include <gunrock/graphio/market.cuh>
// connected component includes
#include <gunrock/app/cc/cc_enactor.cuh>
#include <gunrock/app/cc/cc_problem.cuh>
#include <gunrock/app/cc/cc_functor.cuh>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::cc;
/**
* @brief CC_Parameter structure
*/
struct CC_Parameter : gunrock::app::TestParameter_Base
{
public:
CC_Parameter() { }
~CC_Parameter() { }
};
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK >
void runCC(GRGraph* output, CC_Parameter *parameter);
/**
* @brief Run test
*
* @tparam VertexId Vertex identifier type
* @tparam Value Attribute type
* @tparam SizeT Graph size type
* @tparam INSTRUMENT Keep kernels statics
* @tparam DEBUG Keep debug statics
*
* @param[out] output Pointer to output graph structure of the problem
* @param[in] parameter primitive-specific test parameters
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG >
void sizeCheckCC(GRGraph* output, CC_Parameter *parameter)
{
if (parameter->size_check)
runCC<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
true > (output, parameter);
else
runCC<VertexId, Value, SizeT, INSTRUMENT, DEBUG,
false> (output, parameter);
}
/**
* @brief Run test
*
* @tparam VertexId Vertex identifier type
* @tparam Value Attribute type
* @tparam SizeT Graph size type
* @tparam INSTRUMENT Keep kernels statics
*
* @param[out] output Pointer to output graph structure of the problem
* @param[in] parameter primitive-specific test parameters
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT >
void debugCC(GRGraph* output, CC_Parameter *parameter)
{
if (parameter->debug)
sizeCheckCC<VertexId, Value, SizeT, INSTRUMENT,
true > (output, parameter);
else
sizeCheckCC<VertexId, Value, SizeT, INSTRUMENT,
false> (output, parameter);
}
/**
* @brief Run test
*
* @tparam VertexId Vertex identifier type
* @tparam Value Attribute type
* @tparam SizeT Graph size type
*
* @param[out] output Pointer to output graph structure of the problem
* @param[in] parameter primitive-specific test parameters
*/
template <
typename VertexId,
typename Value,
typename SizeT >
void instrumentedCC(GRGraph* output, CC_Parameter *parameter)
{
if (parameter->instrumented)
debugCC<VertexId, Value, SizeT, true>(output, parameter);
else
debugCC<VertexId, Value, SizeT, false>(output, parameter);
}
/**
* @brief Run test
*
* @tparam VertexId Vertex identifier type
* @tparam Value Attribute type
* @tparam SizeT Graph size type
* @tparam INSTRUMENT Keep kernels statics
* @tparam DEBUG Keep debug statics
* @tparam SIZE_CHECK Enable size check
*
* @param[out] output Pointer to output graph structure of the problem
* @param[in] parameter primitive-specific test parameters
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT,
bool DEBUG,
bool SIZE_CHECK >
void runCC(GRGraph* output, CC_Parameter *parameter)
{
typedef CCProblem < VertexId,
SizeT,
Value,
false > CcProblem; // use double buffer
typedef CCEnactor < CcProblem,
INSTRUMENT,
DEBUG,
SIZE_CHECK > CcEnactor;
Csr<VertexId, Value, SizeT> *graph =
(Csr<VertexId, Value, SizeT>*)parameter->graph;
bool quiet = parameter -> g_quiet;
int max_grid_size = parameter -> max_grid_size;
int num_gpus = parameter -> num_gpus;
double max_queue_sizing = parameter -> max_queue_sizing;
double max_in_sizing = parameter -> max_in_sizing;
ContextPtr *context = (ContextPtr*)parameter -> context;
std::string partition_method = parameter -> partition_method;
int *gpu_idx = parameter -> gpu_idx;
cudaStream_t *streams = parameter -> streams;
float partition_factor = parameter -> partition_factor;
int partition_seed = parameter -> partition_seed;
bool g_stream_from_host = parameter -> g_stream_from_host;
size_t *org_size = new size_t [num_gpus];
// Allocate host-side label array
VertexId *h_component_ids = new VertexId[graph->nodes];
for (int gpu = 0; gpu < num_gpus; gpu++)
{
size_t dummy;
cudaSetDevice(gpu_idx[gpu]);
cudaMemGetInfo(&(org_size[gpu]), &dummy);
}
CcEnactor* enactor = new CcEnactor(num_gpus, gpu_idx); // CC enactor map
CcProblem* problem = new CcProblem; // Allocate problem on GPU
util::GRError(
problem->Init(
g_stream_from_host,
graph,
NULL,
num_gpus,
gpu_idx,
partition_method,
streams,
max_queue_sizing,
max_in_sizing,
partition_factor,
partition_seed),
"CC Problem Initialization Failed", __FILE__, __LINE__);
util::GRError(
enactor->Init(context, problem, max_grid_size),
"CC Enactor Init failed", __FILE__, __LINE__);
// Perform CC
CpuTimer cpu_timer;
util::GRError(
problem->Reset(enactor->GetFrontierType(), max_queue_sizing),
"CC Problem Data Reset Failed", __FILE__, __LINE__);
util::GRError(
enactor->Reset(), "CC Enactor Reset failed", __FILE__, __LINE__);
cpu_timer.Start();
util::GRError(
enactor->Enact(), "CC Problem Enact Failed", __FILE__, __LINE__);
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
// Copy out results
util::GRError(
problem->Extract(h_component_ids),
"CC Problem Data Extraction Failed", __FILE__, __LINE__);
unsigned int num_components = problem->num_components;
output->aggregation = (unsigned int*)&num_components;
output->node_value1 = (VertexId*)&h_component_ids[0];
if (!quiet)
{
printf(" GPU Connected Component finished in %lf msec.\n", elapsed);
}
// Clean up
if (org_size) { delete[] org_size; org_size = NULL; }
if (problem ) { delete problem ; problem = NULL; }
if (enactor ) { delete enactor ; enactor = NULL; }
}
/**
* @brief Dispatch function to handle configurations
*
* @param[out] grapho Pointer to output graph structure of the problem
* @param[in] graphi Pointer to input graph we need to process on
* @param[in] config Primitive-specific configurations
* @param[in] data_t Data type configurations
* @param[in] context ModernGPU context
* @param[in] streams CUDA stream
*/
void dispatch_cc(
GRGraph* grapho,
const GRGraph* graphi,
const GRSetup config,
const GRTypes data_t,
ContextPtr* context,
cudaStream_t* streams)
{
CC_Parameter *parameter = new CC_Parameter;
parameter->context = context;
parameter->streams = streams;
parameter->g_quiet = config.quiet;
parameter->num_gpus = config.num_devices;
parameter->gpu_idx = config.device_list;
switch (data_t.VTXID_TYPE)
{
case VTXID_INT:
{
switch (data_t.SIZET_TYPE)
{
case SIZET_INT:
{
switch (data_t.VALUE_TYPE)
{
case VALUE_INT: // template type = <int, int, int>
{
// build input CSR format graph
Csr<int, int, int> csr(false);
csr.nodes = graphi->num_nodes;
csr.edges = graphi->num_edges;
csr.row_offsets = (int*)graphi->row_offsets;
csr.column_indices = (int*)graphi->col_indices;
parameter->graph = &csr;
instrumentedCC<int, int, int>(grapho, parameter);
// reset for free memory
csr.row_offsets = NULL;
csr.column_indices = NULL;
break;
}
case VALUE_UINT: // template type = <int, uint, int>
{
printf("Not Yet Support This DataType Combination.\n");
break;
}
case VALUE_FLOAT: // template type = <int, float, int>
{
printf("Not Yet Support This DataType Combination.\n");
break;
}
}
break;
}
}
break;
}
}
}
/*
* @brief Entry of gunrock_cc function
*
* @param[out] grapho Pointer to output graph structure of the problem
* @param[in] graphi Pointer to input graph we need to process on
* @param[in] config Gunrock primitive specific configurations
* @param[in] data_t Gunrock data type structure
*/
void gunrock_cc(
GRGraph *grapho,
const GRGraph *graphi,
const GRSetup config,
const GRTypes data_t)
{
// GPU-related configurations
int num_gpus = 0;
int *gpu_idx = NULL;
ContextPtr *context = NULL;
cudaStream_t *streams = NULL;
num_gpus = config.num_devices;
gpu_idx = new int [num_gpus];
for (int i = 0; i < num_gpus; ++i)
{
gpu_idx[i] = config.device_list[i];
}
// Create streams and MordernGPU context for each GPU
streams = new cudaStream_t[num_gpus * num_gpus * 2];
context = new ContextPtr[num_gpus * num_gpus];
if (!config.quiet) { printf(" using %d GPUs:", num_gpus); }
for (int gpu = 0; gpu < num_gpus; ++gpu)
{
if (!config.quiet) { printf(" %d ", gpu_idx[gpu]); }
util::SetDevice(gpu_idx[gpu]);
for (int i = 0; i < num_gpus * 2; ++i)
{
int _i = gpu * num_gpus * 2 + i;
util::GRError(cudaStreamCreate(&streams[_i]),
"cudaStreamCreate fialed.", __FILE__, __LINE__);
if (i < num_gpus)
{
context[gpu * num_gpus + i] =
mgpu::CreateCudaDeviceAttachStream(gpu_idx[gpu],
streams[_i]);
}
}
}
if (!config.quiet) { printf("\n"); }
dispatch_cc(grapho, graphi, config, data_t, context, streams);
}
/*
* @brief Simple interface take in CSR arrays as input
*
* @param[out] components Return component ID for each node
* @param[out] num_comps Return number of components calculated
* @param[in] num_nodes Number of nodes of the input graph
* @param[in] num_edges Number of edges of the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
*/
int cc(
int* component,
const int num_nodes,
const int num_edges,
const int* row_offsets,
const int* col_indices)
{
struct GRTypes data_t; // primitive-specific data types
data_t.VTXID_TYPE = VTXID_INT; // integer vertex identifier
data_t.SIZET_TYPE = SIZET_INT; // integer graph size type
data_t.VALUE_TYPE = VALUE_INT; // integer attributes type
struct GRSetup config = InitSetup(); // primitive-specific configures
struct GRGraph *grapho = (struct GRGraph*)malloc(sizeof(struct GRGraph));
struct GRGraph *graphi = (struct GRGraph*)malloc(sizeof(struct GRGraph));
graphi->num_nodes = num_nodes; // setting graph nodes
graphi->num_edges = num_edges; // setting graph edges
graphi->row_offsets = (void*)&row_offsets[0]; // setting row_offsets
graphi->col_indices = (void*)&col_indices[0]; // setting col_indices
gunrock_cc(grapho, graphi, config, data_t);
int* num_components = (int*)grapho->aggregation;
memcpy(component, (int*)grapho->node_value1, num_nodes * sizeof(int));
if (graphi) free(graphi);
if (grapho) free(grapho);
return *num_components;
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
b8f04efca492ad9a1eba7ede94b471104c1d14ba.hip | // !!! This is a file automatically generated by hipify!!!
/**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "forward_backward_pass.h"
#include "rpu_pulsed_meta_parameter.h"
#include "rpucuda_transfer_device.h"
#include <memory>
namespace RPU {
/******************************************************************************************/
/* DefferenceRPUDeviceCuda
CUDA implementation of TransferRPUDevice
*/
template <typename T> void TransferRPUDeviceCuda<T>::initialize() {
transfer_pwu_ =
RPU::make_unique<PulsedWeightUpdater<T>>(this->context_, this->x_size_, this->d_size_);
transfer_iom_ =
RPU::make_unique<InputOutputManager<T>>(this->context_, this->x_size_, this->d_size_);
}
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(
CudaContext *c, const TransferRPUDevice<T> &rpu_device) {
this->context_ = c;
populateFrom(rpu_device); // use populate to call parent
};
// copy construcutor
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(const TransferRPUDeviceCuda<T> &other)
: VectorRPUDeviceCuda<T>(other) {
if (other.transfer_vecs_ != nullptr) {
transfer_vecs_ = RPU::make_unique<CudaArray<T>>(*other.transfer_vecs_);
}
initialize();
current_col_indices_ = other.current_col_indices_;
fully_hidden_ = other.fully_hidden_;
this->context_->synchronizeDevice();
};
// copy assignment
template <typename T>
TransferRPUDeviceCuda<T> &
TransferRPUDeviceCuda<T>::operator=(const TransferRPUDeviceCuda<T> &other) {
TransferRPUDeviceCuda<T> tmp(other);
swap(*this, tmp);
return *this;
};
// move constructor
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(TransferRPUDeviceCuda<T> &&other) {
*this = std::move(other);
};
// move assignment
template <typename T>
TransferRPUDeviceCuda<T> &TransferRPUDeviceCuda<T>::operator=(TransferRPUDeviceCuda<T> &&other) {
VectorRPUDeviceCuda<T>::operator=(std::move(other));
transfer_vecs_ = std::move(other.transfer_vecs_);
current_col_indices_ = other.current_col_indices_;
other.current_col_indices_.clear();
fully_hidden_ = other.fully_hidden_;
transfer_pwu_ = std::move(other.transfer_pwu_);
transfer_iom_ = std::move(other.transfer_iom_);
// ignore transfer_tmp_ or RNG
return *this;
};
template <typename T>
void TransferRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) {
const auto &rpu_device = dynamic_cast<const TransferRPUDevice<T> &>(rpu_device_in);
if (&rpu_device == nullptr) {
RPU_FATAL("populateFrom expects TransferRPUDevice.");
}
VectorRPUDeviceCuda<T>::populateFrom(rpu_device_in);
const auto &par = getPar();
if (!par.singleDeviceUpdate()) {
RPU_FATAL("Multiple device update not supported for Transfer Device");
}
if (!par.same_context) {
RPU_FATAL("Only same context supported");
}
if (this->n_devices_ < 2) {
RPU_FATAL("Expect at least two devices.");
}
for (int j = 1; j < this->n_devices_ - 1; j++) {
if (par.transfer_every_vec[0] > par.transfer_every_vec[j]) {
RPU_FATAL("Later transfer periods need to be larger than first for CUDA.");
}
}
transfer_vecs_ = RPU::make_unique<CudaArray<T>>(
this->context_, this->x_size_ * this->x_size_, rpu_device.getTransferVecs());
initialize(); // pwu/iom
current_col_indices_.resize(this->n_devices_ - 1);
std::fill(current_col_indices_.begin(), current_col_indices_.end(), (int)0);
this->current_update_idx_ = 0;
fully_hidden_ = par.fullyHidden();
}
/* partially transfer using the given "readout" transfer vectors
(with io-managed forward) and the usualy device update */
template <typename T>
void TransferRPUDeviceCuda<T>::forwardUpdate(
int to_device_idx,
int from_device_idx,
int i_col_start,
const T lr,
const T *x_input,
const int n_vec,
const bool trans,
const PulsedUpdateMetaParameter<T> &up) {
if (!lr) {
return;
}
if ((transfer_tmp_ == nullptr) || transfer_tmp_->getSize() < n_vec * this->d_size_) {
transfer_tmp_ = RPU::make_unique<CudaArray<T>>(this->context_, this->d_size_ * n_vec);
this->context_->synchronize();
}
// forward with transfer vectors
RPU::detail::forwardMatrixIteratorIOManaged(
this->context_, this->dev_weights_ptrs_[from_device_idx], x_input, this->x_size_, trans,
transfer_tmp_->getData(), this->d_size_, trans, n_vec,
(T)1.0, // additional output scaling. Obey?
*transfer_iom_, getPar().transfer_io, false);
// update according to device
T *W = this->dev_weights_ptrs_[to_device_idx]; /// note that the ptrs might point to the current
/// weight
// since we need *positive* update, LR needs to be
// negative. However, this is not supported in the PWU
// really. Thus we scale the temp-vector by -1
RPU::math::scal(this->context_, this->d_size_ * n_vec, (T)-1.0, transfer_tmp_->getData(), 1);
transfer_pwu_->update(
x_input, // this is the transfer vector (x_size)
transfer_tmp_->getDataConst(), // this should be d_size
W, &*this->rpucuda_device_vec_[to_device_idx], up, fabs(lr), n_vec, trans, trans);
}
template <typename T>
void TransferRPUDeviceCuda<T>::transfer(
int to_device_idx,
int from_device_idx,
const PulsedUpdateMetaParameter<T> ¤t_up,
const T current_lr) {
int i_col = current_col_indices_[from_device_idx];
const auto &par = getPar();
if (par.random_column) {
i_col = MAX(MIN(floor(this->rw_rng_.sampleUniform() * this->x_size_), this->x_size_ - 1), 0);
}
// transfer_vecs_ is always x_size-major (that is trans==false)
T *tvec = transfer_vecs_->getData() + i_col * this->x_size_;
int n_rest = this->x_size_ - i_col;
T lr = par.getTransferLR(to_device_idx, from_device_idx, current_lr);
const PulsedUpdateMetaParameter<T> *up;
up = &par.transfer_up;
int n_transfers = MIN(par.n_cols_per_transfer, this->x_size_);
if (n_rest < n_transfers) {
// rest
forwardUpdate(to_device_idx, from_device_idx, i_col, lr, tvec, n_rest, false, *up);
// from beginning
forwardUpdate(
to_device_idx, from_device_idx, 0, lr, transfer_vecs_->getData(), n_transfers - n_rest,
false, *up);
} else {
forwardUpdate(to_device_idx, from_device_idx, i_col, lr, tvec, n_transfers, false, *up);
}
if (this->rw_rng_.sampleUniform() <
par.with_reset_prob) { // COL-wise prob!! device-wise reset_prob=1
this->rpucuda_device_vec_[from_device_idx]->resetCols(
this->dev_weights_ptrs_[from_device_idx], i_col, n_transfers, 1);
}
current_col_indices_[from_device_idx] = (i_col + n_transfers) % this->x_size_;
}
template <typename T>
inline int TransferRPUDeviceCuda<T>::getTransferEvery(int didx, int m_batch) const {
if (getPar().units_in_mbatch) {
return MAX(ceil(getPar().transfer_every_vec[didx] * m_batch), 0);
} else {
return MAX(round(getPar().transfer_every_vec[didx]), 0);
}
}
template <typename T> inline int getNChunks(int m_batch, T every) {
if (every <= 0) {
return 1;
} else {
return MAX((int)(round((T)m_batch / every)), 1); // take next integer for period
}
}
inline int getChunkSize(int m_batch, int nchunks) {
return (m_batch + nchunks - 1) / nchunks; // to ensure not to have residual
}
template <typename T>
pwukpvec_t<T> TransferRPUDeviceCuda<T>::getUpdateKernels(
int m_batch, int nK32, int use_bo64, bool out_trans, const PulsedUpdateMetaParameter<T> &up) {
pwukpvec_t<T> v;
int nchunks = getNChunks(m_batch, getTransferEvery(0, m_batch));
int chunk_size = getChunkSize(m_batch, nchunks);
// use the first device as the "FAST" device that gets updates with the true gradients.
v = this->rpucuda_device_vec_[0]->getUpdateKernels(chunk_size, nK32, use_bo64, out_trans, up);
if (nchunks > 1) {
for (auto &kpars : v) {
kpars->ensureChunk();
}
}
return v;
}
template <typename T>
void TransferRPUDeviceCuda<T>::runUpdateKernel(
pwukp_t<T> kpars,
CudaContext *up_context,
T *dev_weights,
int m_batch,
const BitLineMaker<T> *blm,
const PulsedUpdateMetaParameter<T> &up,
hiprandState_t *dev_states,
int one_sided,
uint32_t *x_counts_chunk,
uint32_t *d_counts_chunk) {
// calling kpars->run(..,this,..) directly should cause error because derived from abstract
// device..
DEBUG_OUT("start run update kernel.");
DEBUG_CALL(kpars->print(););
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
// always same (up) context.
CudaContext *c = up_context;
if (x_counts_chunk != nullptr || d_counts_chunk != nullptr) {
RPU_FATAL("Chunking not allowed here.");
}
int nchunks = getNChunks(m_batch, getTransferEvery(0, m_batch));
T lr = blm->getCurrentLR();
if (nchunks <= 1) {
// just update the whole batch we do not call kpars directly to
// also make possible to have non-pulsed devices. Note that only
// one device is directly updated with the gradients, thus
// tuning kpars are always unique (and valid to that rpu_device
// only). However, the other RPU device kernels will be tuned
// during transfer, since we use a seperate PWU object
this->rpucuda_device_vec_[0]->runUpdateKernel(
kpars, c, this->dev_weights_ptrs_[0], m_batch, blm, up, dev_states, one_sided);
if (up._currently_tuning) {
return;
}
this->current_update_idx_ += m_batch; // first update idx
for (int j = 0; j < this->n_devices_ - 1; j++) {
// all transfer periods will be rounded up to batches.
int period = (getTransferEvery(j, m_batch) + m_batch - 1) / m_batch; // in m_batch
if (period > 0 && this->current_update_idx_ / m_batch % period == 0) {
transfer(j + 1, j, up, lr);
}
}
} else {
// need to do it chunkwise
int chunk_size = getChunkSize(m_batch, nchunks);
int batch_start = 0;
int nK32 = blm->getNK32Current();
auto x_counts = blm->getXCountsData();
auto d_counts = blm->getDCountsData();
for (int i_chunk = 0; i_chunk < nchunks; i_chunk++) {
// note that last chunk might be smaller.
int current_m_batch = chunk_size - MAX(batch_start + chunk_size - m_batch, 0);
this->rpucuda_device_vec_[0]->runUpdateKernel(
kpars,
c, // same context since sequence important
this->dev_weights_ptrs_[0], current_m_batch, blm, up, dev_states, one_sided,
x_counts + batch_start * this->x_size_ * nK32, // always non-trans
d_counts + batch_start * this->d_size_ * nK32);
if (up._currently_tuning) {
return;
}
this->current_update_idx_ += current_m_batch; // first update idx
batch_start += current_m_batch;
// transfer
for (int j = 0; j < this->n_devices_ - 1; j++) {
// all transfer periods will be rounded up to chunk_sizes
int period = (getTransferEvery(j, m_batch) + chunk_size - 1) / chunk_size;
if (period > 0 && this->current_update_idx_ / chunk_size % period == 0) {
transfer(j + 1, j, up, lr);
}
}
}
}
// only reduce at end
this->reduceToWeights(up_context, dev_weights);
}
template <typename T>
void TransferRPUDeviceCuda<T>::reduceToWeights(CudaContext *context, T *dev_weights) {
if (!fully_hidden_) {
VectorRPUDeviceCuda<T>::reduceToWeights(context, dev_weights);
}
}
template <typename T>
void TransferRPUDeviceCuda<T>::decayWeights(T *dev_weights, T alpha, bool bias_no_decay) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::decayWeights(dev_weights, alpha, bias_no_decay);
}
template <typename T>
void TransferRPUDeviceCuda<T>::decayWeights(T *dev_weights, bool bias_no_decay) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::decayWeights(dev_weights, bias_no_decay);
}
template <typename T> void TransferRPUDeviceCuda<T>::diffuseWeights(T *dev_weights) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::diffuseWeights(dev_weights);
}
template <typename T> void TransferRPUDeviceCuda<T>::clipWeights(T *dev_weights, T clip) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::clipWeights(dev_weights, clip);
}
template class TransferRPUDeviceCuda<float>;
#ifdef RPU_USE_DOUBLE
template class TransferRPUDeviceCuda<double>;
#endif
} // namespace RPU
| b8f04efca492ad9a1eba7ede94b471104c1d14ba.cu | /**
* (C) Copyright 2020, 2021 IBM. All Rights Reserved.
*
* This code is licensed under the Apache License, Version 2.0. You may
* obtain a copy of this license in the LICENSE.txt file in the root directory
* of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
*
* Any modifications or derivative works of this code must retain this
* copyright notice, and modified files need to carry a notice indicating
* that they have been altered from the originals.
*/
#include "forward_backward_pass.h"
#include "rpu_pulsed_meta_parameter.h"
#include "rpucuda_transfer_device.h"
#include <memory>
namespace RPU {
/******************************************************************************************/
/* DefferenceRPUDeviceCuda
CUDA implementation of TransferRPUDevice
*/
template <typename T> void TransferRPUDeviceCuda<T>::initialize() {
transfer_pwu_ =
RPU::make_unique<PulsedWeightUpdater<T>>(this->context_, this->x_size_, this->d_size_);
transfer_iom_ =
RPU::make_unique<InputOutputManager<T>>(this->context_, this->x_size_, this->d_size_);
}
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(
CudaContext *c, const TransferRPUDevice<T> &rpu_device) {
this->context_ = c;
populateFrom(rpu_device); // use populate to call parent
};
// copy construcutor
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(const TransferRPUDeviceCuda<T> &other)
: VectorRPUDeviceCuda<T>(other) {
if (other.transfer_vecs_ != nullptr) {
transfer_vecs_ = RPU::make_unique<CudaArray<T>>(*other.transfer_vecs_);
}
initialize();
current_col_indices_ = other.current_col_indices_;
fully_hidden_ = other.fully_hidden_;
this->context_->synchronizeDevice();
};
// copy assignment
template <typename T>
TransferRPUDeviceCuda<T> &
TransferRPUDeviceCuda<T>::operator=(const TransferRPUDeviceCuda<T> &other) {
TransferRPUDeviceCuda<T> tmp(other);
swap(*this, tmp);
return *this;
};
// move constructor
template <typename T>
TransferRPUDeviceCuda<T>::TransferRPUDeviceCuda(TransferRPUDeviceCuda<T> &&other) {
*this = std::move(other);
};
// move assignment
template <typename T>
TransferRPUDeviceCuda<T> &TransferRPUDeviceCuda<T>::operator=(TransferRPUDeviceCuda<T> &&other) {
VectorRPUDeviceCuda<T>::operator=(std::move(other));
transfer_vecs_ = std::move(other.transfer_vecs_);
current_col_indices_ = other.current_col_indices_;
other.current_col_indices_.clear();
fully_hidden_ = other.fully_hidden_;
transfer_pwu_ = std::move(other.transfer_pwu_);
transfer_iom_ = std::move(other.transfer_iom_);
// ignore transfer_tmp_ or RNG
return *this;
};
template <typename T>
void TransferRPUDeviceCuda<T>::populateFrom(const AbstractRPUDevice<T> &rpu_device_in) {
const auto &rpu_device = dynamic_cast<const TransferRPUDevice<T> &>(rpu_device_in);
if (&rpu_device == nullptr) {
RPU_FATAL("populateFrom expects TransferRPUDevice.");
}
VectorRPUDeviceCuda<T>::populateFrom(rpu_device_in);
const auto &par = getPar();
if (!par.singleDeviceUpdate()) {
RPU_FATAL("Multiple device update not supported for Transfer Device");
}
if (!par.same_context) {
RPU_FATAL("Only same context supported");
}
if (this->n_devices_ < 2) {
RPU_FATAL("Expect at least two devices.");
}
for (int j = 1; j < this->n_devices_ - 1; j++) {
if (par.transfer_every_vec[0] > par.transfer_every_vec[j]) {
RPU_FATAL("Later transfer periods need to be larger than first for CUDA.");
}
}
transfer_vecs_ = RPU::make_unique<CudaArray<T>>(
this->context_, this->x_size_ * this->x_size_, rpu_device.getTransferVecs());
initialize(); // pwu/iom
current_col_indices_.resize(this->n_devices_ - 1);
std::fill(current_col_indices_.begin(), current_col_indices_.end(), (int)0);
this->current_update_idx_ = 0;
fully_hidden_ = par.fullyHidden();
}
/* partially transfer using the given "readout" transfer vectors
(with io-managed forward) and the usualy device update */
template <typename T>
void TransferRPUDeviceCuda<T>::forwardUpdate(
int to_device_idx,
int from_device_idx,
int i_col_start,
const T lr,
const T *x_input,
const int n_vec,
const bool trans,
const PulsedUpdateMetaParameter<T> &up) {
if (!lr) {
return;
}
if ((transfer_tmp_ == nullptr) || transfer_tmp_->getSize() < n_vec * this->d_size_) {
transfer_tmp_ = RPU::make_unique<CudaArray<T>>(this->context_, this->d_size_ * n_vec);
this->context_->synchronize();
}
// forward with transfer vectors
RPU::detail::forwardMatrixIteratorIOManaged(
this->context_, this->dev_weights_ptrs_[from_device_idx], x_input, this->x_size_, trans,
transfer_tmp_->getData(), this->d_size_, trans, n_vec,
(T)1.0, // additional output scaling. Obey?
*transfer_iom_, getPar().transfer_io, false);
// update according to device
T *W = this->dev_weights_ptrs_[to_device_idx]; /// note that the ptrs might point to the current
/// weight
// since we need *positive* update, LR needs to be
// negative. However, this is not supported in the PWU
// really. Thus we scale the temp-vector by -1
RPU::math::scal(this->context_, this->d_size_ * n_vec, (T)-1.0, transfer_tmp_->getData(), 1);
transfer_pwu_->update(
x_input, // this is the transfer vector (x_size)
transfer_tmp_->getDataConst(), // this should be d_size
W, &*this->rpucuda_device_vec_[to_device_idx], up, fabs(lr), n_vec, trans, trans);
}
template <typename T>
void TransferRPUDeviceCuda<T>::transfer(
int to_device_idx,
int from_device_idx,
const PulsedUpdateMetaParameter<T> ¤t_up,
const T current_lr) {
int i_col = current_col_indices_[from_device_idx];
const auto &par = getPar();
if (par.random_column) {
i_col = MAX(MIN(floor(this->rw_rng_.sampleUniform() * this->x_size_), this->x_size_ - 1), 0);
}
// transfer_vecs_ is always x_size-major (that is trans==false)
T *tvec = transfer_vecs_->getData() + i_col * this->x_size_;
int n_rest = this->x_size_ - i_col;
T lr = par.getTransferLR(to_device_idx, from_device_idx, current_lr);
const PulsedUpdateMetaParameter<T> *up;
up = &par.transfer_up;
int n_transfers = MIN(par.n_cols_per_transfer, this->x_size_);
if (n_rest < n_transfers) {
// rest
forwardUpdate(to_device_idx, from_device_idx, i_col, lr, tvec, n_rest, false, *up);
// from beginning
forwardUpdate(
to_device_idx, from_device_idx, 0, lr, transfer_vecs_->getData(), n_transfers - n_rest,
false, *up);
} else {
forwardUpdate(to_device_idx, from_device_idx, i_col, lr, tvec, n_transfers, false, *up);
}
if (this->rw_rng_.sampleUniform() <
par.with_reset_prob) { // COL-wise prob!! device-wise reset_prob=1
this->rpucuda_device_vec_[from_device_idx]->resetCols(
this->dev_weights_ptrs_[from_device_idx], i_col, n_transfers, 1);
}
current_col_indices_[from_device_idx] = (i_col + n_transfers) % this->x_size_;
}
template <typename T>
inline int TransferRPUDeviceCuda<T>::getTransferEvery(int didx, int m_batch) const {
if (getPar().units_in_mbatch) {
return MAX(ceil(getPar().transfer_every_vec[didx] * m_batch), 0);
} else {
return MAX(round(getPar().transfer_every_vec[didx]), 0);
}
}
template <typename T> inline int getNChunks(int m_batch, T every) {
if (every <= 0) {
return 1;
} else {
return MAX((int)(round((T)m_batch / every)), 1); // take next integer for period
}
}
inline int getChunkSize(int m_batch, int nchunks) {
return (m_batch + nchunks - 1) / nchunks; // to ensure not to have residual
}
template <typename T>
pwukpvec_t<T> TransferRPUDeviceCuda<T>::getUpdateKernels(
int m_batch, int nK32, int use_bo64, bool out_trans, const PulsedUpdateMetaParameter<T> &up) {
pwukpvec_t<T> v;
int nchunks = getNChunks(m_batch, getTransferEvery(0, m_batch));
int chunk_size = getChunkSize(m_batch, nchunks);
// use the first device as the "FAST" device that gets updates with the true gradients.
v = this->rpucuda_device_vec_[0]->getUpdateKernels(chunk_size, nK32, use_bo64, out_trans, up);
if (nchunks > 1) {
for (auto &kpars : v) {
kpars->ensureChunk();
}
}
return v;
}
template <typename T>
void TransferRPUDeviceCuda<T>::runUpdateKernel(
pwukp_t<T> kpars,
CudaContext *up_context,
T *dev_weights,
int m_batch,
const BitLineMaker<T> *blm,
const PulsedUpdateMetaParameter<T> &up,
curandState_t *dev_states,
int one_sided,
uint32_t *x_counts_chunk,
uint32_t *d_counts_chunk) {
// calling kpars->run(..,this,..) directly should cause error because derived from abstract
// device..
DEBUG_OUT("start run update kernel.");
DEBUG_CALL(kpars->print(););
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
// always same (up) context.
CudaContext *c = up_context;
if (x_counts_chunk != nullptr || d_counts_chunk != nullptr) {
RPU_FATAL("Chunking not allowed here.");
}
int nchunks = getNChunks(m_batch, getTransferEvery(0, m_batch));
T lr = blm->getCurrentLR();
if (nchunks <= 1) {
// just update the whole batch we do not call kpars directly to
// also make possible to have non-pulsed devices. Note that only
// one device is directly updated with the gradients, thus
// tuning kpars are always unique (and valid to that rpu_device
// only). However, the other RPU device kernels will be tuned
// during transfer, since we use a seperate PWU object
this->rpucuda_device_vec_[0]->runUpdateKernel(
kpars, c, this->dev_weights_ptrs_[0], m_batch, blm, up, dev_states, one_sided);
if (up._currently_tuning) {
return;
}
this->current_update_idx_ += m_batch; // first update idx
for (int j = 0; j < this->n_devices_ - 1; j++) {
// all transfer periods will be rounded up to batches.
int period = (getTransferEvery(j, m_batch) + m_batch - 1) / m_batch; // in m_batch
if (period > 0 && this->current_update_idx_ / m_batch % period == 0) {
transfer(j + 1, j, up, lr);
}
}
} else {
// need to do it chunkwise
int chunk_size = getChunkSize(m_batch, nchunks);
int batch_start = 0;
int nK32 = blm->getNK32Current();
auto x_counts = blm->getXCountsData();
auto d_counts = blm->getDCountsData();
for (int i_chunk = 0; i_chunk < nchunks; i_chunk++) {
// note that last chunk might be smaller.
int current_m_batch = chunk_size - MAX(batch_start + chunk_size - m_batch, 0);
this->rpucuda_device_vec_[0]->runUpdateKernel(
kpars,
c, // same context since sequence important
this->dev_weights_ptrs_[0], current_m_batch, blm, up, dev_states, one_sided,
x_counts + batch_start * this->x_size_ * nK32, // always non-trans
d_counts + batch_start * this->d_size_ * nK32);
if (up._currently_tuning) {
return;
}
this->current_update_idx_ += current_m_batch; // first update idx
batch_start += current_m_batch;
// transfer
for (int j = 0; j < this->n_devices_ - 1; j++) {
// all transfer periods will be rounded up to chunk_sizes
int period = (getTransferEvery(j, m_batch) + chunk_size - 1) / chunk_size;
if (period > 0 && this->current_update_idx_ / chunk_size % period == 0) {
transfer(j + 1, j, up, lr);
}
}
}
}
// only reduce at end
this->reduceToWeights(up_context, dev_weights);
}
template <typename T>
void TransferRPUDeviceCuda<T>::reduceToWeights(CudaContext *context, T *dev_weights) {
if (!fully_hidden_) {
VectorRPUDeviceCuda<T>::reduceToWeights(context, dev_weights);
}
}
template <typename T>
void TransferRPUDeviceCuda<T>::decayWeights(T *dev_weights, T alpha, bool bias_no_decay) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::decayWeights(dev_weights, alpha, bias_no_decay);
}
template <typename T>
void TransferRPUDeviceCuda<T>::decayWeights(T *dev_weights, bool bias_no_decay) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::decayWeights(dev_weights, bias_no_decay);
}
template <typename T> void TransferRPUDeviceCuda<T>::diffuseWeights(T *dev_weights) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::diffuseWeights(dev_weights);
}
template <typename T> void TransferRPUDeviceCuda<T>::clipWeights(T *dev_weights, T clip) {
if (fully_hidden_) {
this->dev_weights_ptrs_[this->n_devices_ - 1] = dev_weights;
}
VectorRPUDeviceCuda<T>::clipWeights(dev_weights, clip);
}
template class TransferRPUDeviceCuda<float>;
#ifdef RPU_USE_DOUBLE
template class TransferRPUDeviceCuda<double>;
#endif
} // namespace RPU
|
7a165ac734ece3a45b6f5ea7e43eed4c187f67f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _VECTOR_ADDITION_KERNEL_H_
#define _VECTOR_ADDITION_KERNEL_H_
__global__ void vector_addition_kernel(float *A, float *B, float *C, int num_elements)
{
/* Obtain index of thread within the overall execution grid */
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
/* Compute the stride length = total number of threads */
int stride = blockDim.x * gridDim.x;
while (thread_id < num_elements) {
C[thread_id] = A[thread_id] + B[thread_id];
thread_id += stride;
}
return;
}
#endif /* #ifndef _VECTOR_ADDITION_KERNEL_H_ */
| 7a165ac734ece3a45b6f5ea7e43eed4c187f67f6.cu | #ifndef _VECTOR_ADDITION_KERNEL_H_
#define _VECTOR_ADDITION_KERNEL_H_
__global__ void vector_addition_kernel(float *A, float *B, float *C, int num_elements)
{
/* Obtain index of thread within the overall execution grid */
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
/* Compute the stride length = total number of threads */
int stride = blockDim.x * gridDim.x;
while (thread_id < num_elements) {
C[thread_id] = A[thread_id] + B[thread_id];
thread_id += stride;
}
return;
}
#endif /* #ifndef _VECTOR_ADDITION_KERNEL_H_ */
|
dd70eac729b060d97ba0c7ad6e2da1263a5f58fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "nodes\pass_through.h"
PassThrough::PassThrough(deepflow::NodeParam *param) : Node(param)
{
LOG_IF(FATAL, param->has_pass_through_param() == false) << "param.has_pass_through_param() == false";
_stop_gradients = param->pass_through_param().stop_gradients();
}
void PassThrough::init()
{
_outputs[0]->initValue(_inputs[0]->value()->dims(), _inputs[0]->value());
if (!_stop_gradients)
_outputs[0]->initDiff(_inputs[0]->value()->dims(),_inputs[0]->diff());
}
void PassThrough::forward()
{
}
void PassThrough::backward()
{
if (_stop_gradients && _inputs[0]->diff())
hipMemset(_inputs[0]->diff()->gpu_data(), 0, _inputs[0]->diff()->bytes());
}
std::string PassThrough::to_cpp() const
{
std::string cpp = "auto " + _name + " = df.pass_through(" + _input_name_for_cpp(0) + ", ";
cpp += "\"" + _name + "\");";
return cpp;
}
| dd70eac729b060d97ba0c7ad6e2da1263a5f58fc.cu | #include "nodes\pass_through.h"
PassThrough::PassThrough(deepflow::NodeParam *param) : Node(param)
{
LOG_IF(FATAL, param->has_pass_through_param() == false) << "param.has_pass_through_param() == false";
_stop_gradients = param->pass_through_param().stop_gradients();
}
void PassThrough::init()
{
_outputs[0]->initValue(_inputs[0]->value()->dims(), _inputs[0]->value());
if (!_stop_gradients)
_outputs[0]->initDiff(_inputs[0]->value()->dims(),_inputs[0]->diff());
}
void PassThrough::forward()
{
}
void PassThrough::backward()
{
if (_stop_gradients && _inputs[0]->diff())
cudaMemset(_inputs[0]->diff()->gpu_data(), 0, _inputs[0]->diff()->bytes());
}
std::string PassThrough::to_cpp() const
{
std::string cpp = "auto " + _name + " = df.pass_through(" + _input_name_for_cpp(0) + ", ";
cpp += "\"" + _name + "\");";
return cpp;
}
|
3a399af64bbf9f491ad4b5f34f9df0aeb4910177.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
//CUDA
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//User defined
#include <iostream>
#include "../inc/utils.cuh"
#include "../inc/contrast.cuh"
//OpenCV
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#define LOW_CONTRAST_LENA "../input/500x330.png"
using namespace cv;
int main() {
cv::Mat image = imread(LOW_CONTRAST_LENA, cv::IMREAD_GRAYSCALE);
cv::Mat enhancedImage(image.size(), CV_8U);
auto start = std::chrono::high_resolution_clock::now();
//for (int i = 0; i < 100; ++i) {
enhanceContrast(enhancedImage, image, GRAYSCALE_RANGE);
// }
auto finish = std::chrono::high_resolution_clock::now();
std::cout << "Histogram equalization run time GPU optimized : "
<< std::chrono::duration_cast<std::chrono::microseconds>(finish - start).count() << "microsec\n";
//PLOTTING
imshow("Lena", image);
imshow("Lena enhanced", enhancedImage);
cv::waitKey(0);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
hipError_t cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
| 3a399af64bbf9f491ad4b5f34f9df0aeb4910177.cu | #pragma once
//CUDA
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//User defined
#include <iostream>
#include "../inc/utils.cuh"
#include "../inc/contrast.cuh"
//OpenCV
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#define LOW_CONTRAST_LENA "../input/500x330.png"
using namespace cv;
int main() {
cv::Mat image = imread(LOW_CONTRAST_LENA, cv::IMREAD_GRAYSCALE);
cv::Mat enhancedImage(image.size(), CV_8U);
auto start = std::chrono::high_resolution_clock::now();
//for (int i = 0; i < 100; ++i) {
enhanceContrast(enhancedImage, image, GRAYSCALE_RANGE);
// }
auto finish = std::chrono::high_resolution_clock::now();
std::cout << "Histogram equalization run time GPU optimized : "
<< std::chrono::duration_cast<std::chrono::microseconds>(finish - start).count() << "microsec\n";
//PLOTTING
imshow("Lena", image);
imshow("Lena enhanced", enhancedImage);
cv::waitKey(0);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
34f7772538ff781d083ac4eea68075edbb03a3ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "DeviceStorage.hpp"
#include <stdio.h>
#include <stdlib.h>
#include <complex>
#include "hip/hip_runtime.h"
#include "rocblas.h"
#ifdef _OPENMP
#include <omp.h>
#else
#ifndef LSMS_DUMMY_OPENMP
#define LSMS_DUMMY_OPENMP
inline int omp_get_max_threads() {return 1;}
inline int omp_get_num_threads() {return 1;}
inline int omp_get_thread_num() {return 0;}
#endif
#endif
#include "cudaCheckError.hpp"
#include "cudaDoubleComplex.hpp"
#include <assert.h>
#include <LAPACK.hpp>
//#include <lapack.h>
//#include <mpi.h>
extern "C" int zmatinv_prep1_ (void **a, void **b, int *n, int *lda, hipStream_t thisstream);
extern "C" int zmatinv_batch_ (hipDoubleComplex **A, hipDoubleComplex **Ainv, int *n, int *batch, hipStream_t thisstream);
extern "C" int ilaenv_(int*,const char*,const char*,int*,int*,int*,int*);
void handle_cuda_error ( hipError_t cerr, const char *errmsg )
{
if ( cerr ) {
printf ("CUDA ERROR (%d) %s \n", cerr, errmsg);
abort();
//MPI_Abort(MPI_COMM_WORLD, 100);
}
else {
//printf ("SUCCESS !!! %s \n", errmsg);
}
}
void handle_cublas_error ( hipblasStatus_t cs, const char *errmsg )
{
if ( cs ) {
printf ("cuBLAS ERROR (%d) %s \n", cs, errmsg);
abort();
//MPI_Abort(MPI_COMM_WORLD, 101);
}
else {
//printf ("SUCCESS !!! %s \n", errmsg);
}
}
//TODO call directly from calculateTauMatrix (don't route through fortran)
extern "C"
void zblock_lu_cuda_c_ ( std::complex<double> *a, int *lda, int *blk_sz, int *nblk, int *ipvt, int *mp, int *idcol, int *k)
//===================================================================================================================
/*
Performs a partial inversion of the a matrix to return the inverse of the upper diagonal
subblock.
a : input matrix - double complex
blk_sz : integer array giving the size of each subblock
nblk : the number of subblocks
ipvt : integer work array (not tested in c version)
idcol : integer array specifying symmetry (not tested in c version)
k : returns the actual number of columns in the calculated inverse
*/
{
//TODO:
// adjust allocation sizes
// dynamically choose hybrid or not
// validate flop count
unsigned long long flops=0;
/********************paramters for zgemm rank maximization*******************/
int zgemm_rank = 600;
int gpu_only_blks = 0;
int remaining=0;
for(int i=0;i<gpu_only_blks;i++) {
(*nblk)--;
remaining+=blk_sz[*nblk];
}
while(remaining>0) {
blk_sz[*nblk]=min(55,remaining);
remaining-=55;
(*nblk)++;
}
int currentRank=0;
int m, n;
int ioff, joff;
int info;
hipError_t ce;
hipblasStatus_t cublasStat;
// set constants
const hipDoubleComplex cone = make_cuDoubleComplex( 1.0, 0.0);
const hipDoubleComplex cmone = make_cuDoubleComplex(-1.0, 0.0);
const hipDoubleComplex czero = make_cuDoubleComplex( 0.0, 0.0);
// get the thread number
int threadId = omp_get_thread_num();
/***************************One time initialization, should be moved outside******************************************/
int max_blk_sz=blk_sz[0];
if(*nblk>1)
{
for(int i=1; i<*nblk; i++)
max_blk_sz=max(max_blk_sz,blk_sz[i]);
}
const int MAX_THREADS=16;
//TODO dynamically size
static bool initialized[MAX_THREADS] = {false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false};
static hipDoubleComplex *vdevWork[MAX_THREADS];
static hipDoubleComplex *vdevInv[MAX_THREADS];
static hipDoubleComplex *vdevA2[MAX_THREADS];
static std::complex<double> *vdevHostDiag[MAX_THREADS];
static std::complex<double> *vwork[MAX_THREADS];
static int *vhostIPVT[MAX_THREADS];
static int lwork;
if ( ! initialized[threadId] ) {
//calculate optimial work size for zgetri
int one=1; int mone=-1;
int NB = ilaenv_(&one,"ZGETRI","",&max_blk_sz,&mone,&mone,&mone);
lwork= max_blk_sz * NB;
// allocate space on device
ce = hipMalloc ( &vdevWork[threadId], max_blk_sz*max_blk_sz*sizeof(hipDoubleComplex));
handle_cuda_error (ce, "hipMalloc devWork");
ce = hipMalloc ( &vdevInv[threadId], max_blk_sz*max_blk_sz*sizeof(hipDoubleComplex));
handle_cuda_error (ce, "hipMalloc devInv");
int LDA= *lda;
ce = hipHostMalloc ( &vwork[threadId], lwork *sizeof(std::complex<double>));
handle_cuda_error (ce, "hipHostMalloc vwork");
ce = hipMalloc ( &vdevA2[threadId], max_blk_sz * LDA *sizeof(hipDoubleComplex));
handle_cuda_error (ce, "hipMalloc devA2");
ce = hipHostMalloc ( &vdevHostDiag[threadId], max_blk_sz * max_blk_sz *sizeof(std::complex<double>));
handle_cuda_error (ce, "hipHostMalloc vdevHostDiag");
ce = hipHostMalloc((void**)&vhostIPVT[threadId], max_blk_sz*sizeof(int));
handle_cuda_error (ce, "hipHostMalloc vhostIPVT");
//this speeds up the small block inverse
hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte);
initialized[threadId] = true;
}
/**********************************************************************************************************************/
/********************assign thread private variables********************************/
hipStream_t stream1=get_stream_(0);
hipStream_t stream2=get_stream_(1);
hipEvent_t done_event=get_cuda_event_();
hipDoubleComplex *devWork = vdevWork[threadId];
hipDoubleComplex *devInv = vdevInv[threadId];
hipDoubleComplex *devA=(hipDoubleComplex*)get_dev_m_();
hipDoubleComplex *devA2 = vdevA2[threadId];
std::complex<double> *work = vwork[threadId];
hipblasHandle_t cublasHandle = get_cublas_handle_();
int *hostIPVT=vhostIPVT[threadId];
Complex *hostAdiag = (Complex*)vdevHostDiag[threadId];
/***********************************************************************************/
// add up the sizes of the subblocks to get the size of the entire matrix
int na;
na = 0;
for ( int i=0; i<abs(*nblk); i++ ) {
na += blk_sz[i];
}
// eliminate columns that are equivalent due to symmetry
if ( idcol[0] == 0 ) {
*k = 1;
}
else {
*k = blk_sz[0]+1;
for ( int i=blk_sz[0]-1; i>=0; i-- ) {
if ( idcol[0] == 0 || idcol[i] == i ) {
*k -= 1;
if ( *k != i ) {
printf ("Eliminate columns that are equivalent due to symmetry section in zblock_lu_cuda_c not tested\n");
abort();
// zcopy ( na-blk_sz[0], a[i*lda+blk_sz[0]], 1, a[*k*lda+blk_sz[0]], 1 );
}
}
}
}
#ifndef BUILDKKRMATRIX_GPU
// copy matrix to device
cublasStat = hipblasSetMatrix ( na, na, sizeof(hipDoubleComplex), a, *lda, devA, *lda);
handle_cublas_error ( cublasStat, "hipblasSetMatrix devA ");
#endif
if ( *nblk > 0 ) {
n = blk_sz[*nblk-1];
joff = na - n;
// loop over sub-blocks
for ( int iblk=*nblk-1; iblk>0; iblk-- ) {
m = n;
ioff = joff;
n = blk_sz[iblk-1];
joff = joff-n;
//TODO update condition to chose branch, should do this branch when remaining size is small...
// HPL factorization and left propagation
if ( m<56 ) { //CUDA only version
//A^-1 // invert the clique
// re-package the diagonal block into a dense matrix suitable for sending to zmatinv
hipDoubleComplex *devAdiag;
devAdiag = &devA[ioff* *lda + ioff];
info = zmatinv_prep1_ ( (void**)&devAdiag, (void**)&devWork, &m, lda, stream1 );
if ( info ) { printf (" zmatinv_prep1 returned error code %d \n", info); abort();}
int one = 1;
info = zmatinv_batch_ ( &devWork, &devInv, &m, &one, stream1 );
if ( info ) { printf (" zmatinv_batch returned error code %d \n", info); printf (" m = %d, one = %d \n", m, one ); abort(); }
flops += m * m * m;
}
else { //HYBRID version, do small inverse on the host. This works well.
hipDoubleComplex *devAdiag = (hipDoubleComplex*)&devA[ioff* *lda + ioff];
hipblasSetStream ( cublasHandle, stream1 );
cublasStat = hipblasGetMatrixAsync ( m, m, sizeof(hipDoubleComplex), devAdiag, *lda, hostAdiag, m, stream1 );
hipEventRecord(done_event,stream1);
//wait for transfers to the host to finish
hipEventSynchronize(done_event);
int info;
//zgetrf on host
LAPACK::zgetrf_(&m, &m, hostAdiag, &m, hostIPVT, &info);
//zgetri on host
LAPACK::zgetri_(&m, hostAdiag, &m, hostIPVT, (Complex*)work, &lwork, &info);
flops += m * m * m;
//copy_async down to device
cublasStat = hipblasSetMatrixAsync ( m, m, sizeof(hipDoubleComplex), hostAdiag, m, devInv, m, stream1 );
hipEventRecord(done_event,stream1);
//wait for transfers to the host to finish
hipEventSynchronize(done_event);
}
//CA^-1
#ifdef PRINT_ZGEMM
fprintf(stderr, "m: %d n: %d k: %d lda: %d ldb: %d ldc: %d\n", m, ioff, m, m, *lda, max_blk_sz);
#endif
cublasStat = hipblasZgemm ( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, m, ioff, m, &cone, devInv, m, &devA[ioff], *lda, &czero, devA2, max_blk_sz );
handle_cublas_error ( cublasStat, "Error in hipblasZgemm #1\n" );
flops+= m * ioff * m;
//Mark end of small zgemm in stream1
hipEventRecord(done_event,stream1);
//stream 2 must wait for the small zgemm to finish
hipStreamWaitEvent(stream2,done_event,0);
// Trailing matrix update
currentRank+=m;
if ( currentRank<zgemm_rank && iblk>1) {
// only update the next block row
// little chance for hybrid acceleration here - so ignore for now.
hipblasSetStream ( cublasHandle, stream1 );
// need to place A2 back into A
ce = hipMemcpy2DAsync ( &devA[ioff], *lda*sizeof(hipDoubleComplex), devA2, max_blk_sz*sizeof(hipDoubleComplex), m*sizeof(hipDoubleComplex), ioff, hipMemcpyDeviceToDevice, stream1 );
#ifdef PRINT_ZGEMM
fprintf(stderr, "m: %d n: %d k: %d lda: %d ldb: %d ldc: %d\n", n, ioff, currentRank, *lda, *lda, *lda);
#endif
cublasStat = hipblasZgemm ( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, ioff, currentRank, &cmone,
&devA[ioff* *lda +ioff-n], *lda,
//devA2, max_blk_sz, &cone,
&devA[ioff], *lda, &cone,
&devA[ioff-n], *lda );
flops += n * ioff * currentRank;
#ifdef PRINT_ZGEMM
fprintf(stderr, "m: %d n: %d k: %d lda: %d ldb: %d ldc: %d\n", ioff-n, n, currentRank, *lda, *lda, *lda);
#endif
cublasStat = hipblasZgemm ( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ioff-n, n, currentRank, &cmone,
&devA[ioff * *lda], *lda,
&devA[(ioff-n) * *lda + ioff], *lda, &cone,
&devA[(ioff-n) * *lda], *lda );
flops += (ioff-n)*n*currentRank;
}
else {
// update the full trailing matrix
hipblasSetStream ( cublasHandle, stream1 );
// perform a portion of the zgemm on the gpu
// first need to place A2 back into A
ce = hipMemcpy2DAsync ( &devA[ioff], *lda*sizeof(hipDoubleComplex), devA2, max_blk_sz*sizeof(hipDoubleComplex), m*sizeof(hipDoubleComplex), ioff, hipMemcpyDeviceToDevice, stream1 );
//D=CA^-1B
#ifdef PRINT_ZGEMM
fprintf(stderr, "m: %d n: %d k: %d lda: %d ldb: %d ldc: %d\n", ioff, ioff, currentRank, *lda, *lda, *lda);
#endif
cublasStat = hipblasZgemm ( cublasHandle, HIPBLAS_OP_N, HIPBLAS_OP_N, ioff, ioff, currentRank, &cmone,
&devA[ioff* *lda], *lda,
&devA[ioff], *lda , &cone,
devA, *lda);
flops += ioff * ioff * currentRank;
// just did a full trailing submatrix update, so reset block row delay counter
currentRank=0;
}
} // end for
cublasStat = hipblasGetMatrixAsync ( blk_sz[0], blk_sz[0], sizeof(hipDoubleComplex), devA, *lda, a, *lda, stream1 );
} // end if ( *nblk > 0 )
*k = blk_sz[0];
hipEventRecord(done_event,stream1);
//wait for last transfer to finish
hipEventSynchronize(done_event);
// clean up
//hipFree (devWork);
//hipFree (devInv);
//hipFree (devA);
//hipFree (devA2);
//hipHostFree (ap);
#ifdef PRINT_FLOPS
printf("BLOCK_INV ZGEMM FLOPS: %llu\n", flops*4*2);
#endif
}
| 34f7772538ff781d083ac4eea68075edbb03a3ad.cu | #include "DeviceStorage.hpp"
#include <stdio.h>
#include <stdlib.h>
#include <complex>
#include "cuda_runtime.h"
#include "cublas_v2.h"
#ifdef _OPENMP
#include <omp.h>
#else
#ifndef LSMS_DUMMY_OPENMP
#define LSMS_DUMMY_OPENMP
inline int omp_get_max_threads() {return 1;}
inline int omp_get_num_threads() {return 1;}
inline int omp_get_thread_num() {return 0;}
#endif
#endif
#include "cudaCheckError.hpp"
#include "cudaDoubleComplex.hpp"
#include <assert.h>
#include <LAPACK.hpp>
//#include <lapack.h>
//#include <mpi.h>
extern "C" int zmatinv_prep1_ (void **a, void **b, int *n, int *lda, cudaStream_t thisstream);
extern "C" int zmatinv_batch_ (cuDoubleComplex **A, cuDoubleComplex **Ainv, int *n, int *batch, cudaStream_t thisstream);
extern "C" int ilaenv_(int*,const char*,const char*,int*,int*,int*,int*);
void handle_cuda_error ( cudaError_t cerr, const char *errmsg )
{
if ( cerr ) {
printf ("CUDA ERROR (%d) %s \n", cerr, errmsg);
abort();
//MPI_Abort(MPI_COMM_WORLD, 100);
}
else {
//printf ("SUCCESS !!! %s \n", errmsg);
}
}
void handle_cublas_error ( cublasStatus_t cs, const char *errmsg )
{
if ( cs ) {
printf ("cuBLAS ERROR (%d) %s \n", cs, errmsg);
abort();
//MPI_Abort(MPI_COMM_WORLD, 101);
}
else {
//printf ("SUCCESS !!! %s \n", errmsg);
}
}
//TODO call directly from calculateTauMatrix (don't route through fortran)
extern "C"
void zblock_lu_cuda_c_ ( std::complex<double> *a, int *lda, int *blk_sz, int *nblk, int *ipvt, int *mp, int *idcol, int *k)
//===================================================================================================================
/*
Performs a partial inversion of the a matrix to return the inverse of the upper diagonal
subblock.
a : input matrix - double complex
blk_sz : integer array giving the size of each subblock
nblk : the number of subblocks
ipvt : integer work array (not tested in c version)
idcol : integer array specifying symmetry (not tested in c version)
k : returns the actual number of columns in the calculated inverse
*/
{
//TODO:
// adjust allocation sizes
// dynamically choose hybrid or not
// validate flop count
unsigned long long flops=0;
/********************paramters for zgemm rank maximization*******************/
int zgemm_rank = 600;
int gpu_only_blks = 0;
int remaining=0;
for(int i=0;i<gpu_only_blks;i++) {
(*nblk)--;
remaining+=blk_sz[*nblk];
}
while(remaining>0) {
blk_sz[*nblk]=min(55,remaining);
remaining-=55;
(*nblk)++;
}
int currentRank=0;
int m, n;
int ioff, joff;
int info;
cudaError_t ce;
cublasStatus_t cublasStat;
// set constants
const cuDoubleComplex cone = make_cuDoubleComplex( 1.0, 0.0);
const cuDoubleComplex cmone = make_cuDoubleComplex(-1.0, 0.0);
const cuDoubleComplex czero = make_cuDoubleComplex( 0.0, 0.0);
// get the thread number
int threadId = omp_get_thread_num();
/***************************One time initialization, should be moved outside******************************************/
int max_blk_sz=blk_sz[0];
if(*nblk>1)
{
for(int i=1; i<*nblk; i++)
max_blk_sz=max(max_blk_sz,blk_sz[i]);
}
const int MAX_THREADS=16;
//TODO dynamically size
static bool initialized[MAX_THREADS] = {false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false};
static cuDoubleComplex *vdevWork[MAX_THREADS];
static cuDoubleComplex *vdevInv[MAX_THREADS];
static cuDoubleComplex *vdevA2[MAX_THREADS];
static std::complex<double> *vdevHostDiag[MAX_THREADS];
static std::complex<double> *vwork[MAX_THREADS];
static int *vhostIPVT[MAX_THREADS];
static int lwork;
if ( ! initialized[threadId] ) {
//calculate optimial work size for zgetri
int one=1; int mone=-1;
int NB = ilaenv_(&one,"ZGETRI","",&max_blk_sz,&mone,&mone,&mone);
lwork= max_blk_sz * NB;
// allocate space on device
ce = cudaMalloc ( &vdevWork[threadId], max_blk_sz*max_blk_sz*sizeof(cuDoubleComplex));
handle_cuda_error (ce, "cudaMalloc devWork");
ce = cudaMalloc ( &vdevInv[threadId], max_blk_sz*max_blk_sz*sizeof(cuDoubleComplex));
handle_cuda_error (ce, "cudaMalloc devInv");
int LDA= *lda;
ce = cudaMallocHost ( &vwork[threadId], lwork *sizeof(std::complex<double>));
handle_cuda_error (ce, "cudaMallocHost vwork");
ce = cudaMalloc ( &vdevA2[threadId], max_blk_sz * LDA *sizeof(cuDoubleComplex));
handle_cuda_error (ce, "cudaMalloc devA2");
ce = cudaMallocHost ( &vdevHostDiag[threadId], max_blk_sz * max_blk_sz *sizeof(std::complex<double>));
handle_cuda_error (ce, "cudaMallocHost vdevHostDiag");
ce = cudaMallocHost((void**)&vhostIPVT[threadId], max_blk_sz*sizeof(int));
handle_cuda_error (ce, "cudaMallocHost vhostIPVT");
//this speeds up the small block inverse
cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte);
initialized[threadId] = true;
}
/**********************************************************************************************************************/
/********************assign thread private variables********************************/
cudaStream_t stream1=get_stream_(0);
cudaStream_t stream2=get_stream_(1);
cudaEvent_t done_event=get_cuda_event_();
cuDoubleComplex *devWork = vdevWork[threadId];
cuDoubleComplex *devInv = vdevInv[threadId];
cuDoubleComplex *devA=(cuDoubleComplex*)get_dev_m_();
cuDoubleComplex *devA2 = vdevA2[threadId];
std::complex<double> *work = vwork[threadId];
cublasHandle_t cublasHandle = get_cublas_handle_();
int *hostIPVT=vhostIPVT[threadId];
Complex *hostAdiag = (Complex*)vdevHostDiag[threadId];
/***********************************************************************************/
// add up the sizes of the subblocks to get the size of the entire matrix
int na;
na = 0;
for ( int i=0; i<abs(*nblk); i++ ) {
na += blk_sz[i];
}
// eliminate columns that are equivalent due to symmetry
if ( idcol[0] == 0 ) {
*k = 1;
}
else {
*k = blk_sz[0]+1;
for ( int i=blk_sz[0]-1; i>=0; i-- ) {
if ( idcol[0] == 0 || idcol[i] == i ) {
*k -= 1;
if ( *k != i ) {
printf ("Eliminate columns that are equivalent due to symmetry section in zblock_lu_cuda_c not tested\n");
abort();
// zcopy ( na-blk_sz[0], a[i*lda+blk_sz[0]], 1, a[*k*lda+blk_sz[0]], 1 );
}
}
}
}
#ifndef BUILDKKRMATRIX_GPU
// copy matrix to device
cublasStat = cublasSetMatrix ( na, na, sizeof(cuDoubleComplex), a, *lda, devA, *lda);
handle_cublas_error ( cublasStat, "cublasSetMatrix devA ");
#endif
if ( *nblk > 0 ) {
n = blk_sz[*nblk-1];
joff = na - n;
// loop over sub-blocks
for ( int iblk=*nblk-1; iblk>0; iblk-- ) {
m = n;
ioff = joff;
n = blk_sz[iblk-1];
joff = joff-n;
//TODO update condition to chose branch, should do this branch when remaining size is small...
// HPL factorization and left propagation
if ( m<56 ) { //CUDA only version
//A^-1 // invert the clique
// re-package the diagonal block into a dense matrix suitable for sending to zmatinv
cuDoubleComplex *devAdiag;
devAdiag = &devA[ioff* *lda + ioff];
info = zmatinv_prep1_ ( (void**)&devAdiag, (void**)&devWork, &m, lda, stream1 );
if ( info ) { printf (" zmatinv_prep1 returned error code %d \n", info); abort();}
int one = 1;
info = zmatinv_batch_ ( &devWork, &devInv, &m, &one, stream1 );
if ( info ) { printf (" zmatinv_batch returned error code %d \n", info); printf (" m = %d, one = %d \n", m, one ); abort(); }
flops += m * m * m;
}
else { //HYBRID version, do small inverse on the host. This works well.
cuDoubleComplex *devAdiag = (cuDoubleComplex*)&devA[ioff* *lda + ioff];
cublasSetStream ( cublasHandle, stream1 );
cublasStat = cublasGetMatrixAsync ( m, m, sizeof(cuDoubleComplex), devAdiag, *lda, hostAdiag, m, stream1 );
cudaEventRecord(done_event,stream1);
//wait for transfers to the host to finish
cudaEventSynchronize(done_event);
int info;
//zgetrf on host
LAPACK::zgetrf_(&m, &m, hostAdiag, &m, hostIPVT, &info);
//zgetri on host
LAPACK::zgetri_(&m, hostAdiag, &m, hostIPVT, (Complex*)work, &lwork, &info);
flops += m * m * m;
//copy_async down to device
cublasStat = cublasSetMatrixAsync ( m, m, sizeof(cuDoubleComplex), hostAdiag, m, devInv, m, stream1 );
cudaEventRecord(done_event,stream1);
//wait for transfers to the host to finish
cudaEventSynchronize(done_event);
}
//CA^-1
#ifdef PRINT_ZGEMM
fprintf(stderr, "m: %d n: %d k: %d lda: %d ldb: %d ldc: %d\n", m, ioff, m, m, *lda, max_blk_sz);
#endif
cublasStat = cublasZgemm ( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, m, ioff, m, &cone, devInv, m, &devA[ioff], *lda, &czero, devA2, max_blk_sz );
handle_cublas_error ( cublasStat, "Error in cublasZgemm #1\n" );
flops+= m * ioff * m;
//Mark end of small zgemm in stream1
cudaEventRecord(done_event,stream1);
//stream 2 must wait for the small zgemm to finish
cudaStreamWaitEvent(stream2,done_event,0);
// Trailing matrix update
currentRank+=m;
if ( currentRank<zgemm_rank && iblk>1) {
// only update the next block row
// little chance for hybrid acceleration here - so ignore for now.
cublasSetStream ( cublasHandle, stream1 );
// need to place A2 back into A
ce = cudaMemcpy2DAsync ( &devA[ioff], *lda*sizeof(cuDoubleComplex), devA2, max_blk_sz*sizeof(cuDoubleComplex), m*sizeof(cuDoubleComplex), ioff, cudaMemcpyDeviceToDevice, stream1 );
#ifdef PRINT_ZGEMM
fprintf(stderr, "m: %d n: %d k: %d lda: %d ldb: %d ldc: %d\n", n, ioff, currentRank, *lda, *lda, *lda);
#endif
cublasStat = cublasZgemm ( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, n, ioff, currentRank, &cmone,
&devA[ioff* *lda +ioff-n], *lda,
//devA2, max_blk_sz, &cone,
&devA[ioff], *lda, &cone,
&devA[ioff-n], *lda );
flops += n * ioff * currentRank;
#ifdef PRINT_ZGEMM
fprintf(stderr, "m: %d n: %d k: %d lda: %d ldb: %d ldc: %d\n", ioff-n, n, currentRank, *lda, *lda, *lda);
#endif
cublasStat = cublasZgemm ( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ioff-n, n, currentRank, &cmone,
&devA[ioff * *lda], *lda,
&devA[(ioff-n) * *lda + ioff], *lda, &cone,
&devA[(ioff-n) * *lda], *lda );
flops += (ioff-n)*n*currentRank;
}
else {
// update the full trailing matrix
cublasSetStream ( cublasHandle, stream1 );
// perform a portion of the zgemm on the gpu
// first need to place A2 back into A
ce = cudaMemcpy2DAsync ( &devA[ioff], *lda*sizeof(cuDoubleComplex), devA2, max_blk_sz*sizeof(cuDoubleComplex), m*sizeof(cuDoubleComplex), ioff, cudaMemcpyDeviceToDevice, stream1 );
//D=CA^-1B
#ifdef PRINT_ZGEMM
fprintf(stderr, "m: %d n: %d k: %d lda: %d ldb: %d ldc: %d\n", ioff, ioff, currentRank, *lda, *lda, *lda);
#endif
cublasStat = cublasZgemm ( cublasHandle, CUBLAS_OP_N, CUBLAS_OP_N, ioff, ioff, currentRank, &cmone,
&devA[ioff* *lda], *lda,
&devA[ioff], *lda , &cone,
devA, *lda);
flops += ioff * ioff * currentRank;
// just did a full trailing submatrix update, so reset block row delay counter
currentRank=0;
}
} // end for
cublasStat = cublasGetMatrixAsync ( blk_sz[0], blk_sz[0], sizeof(cuDoubleComplex), devA, *lda, a, *lda, stream1 );
} // end if ( *nblk > 0 )
*k = blk_sz[0];
cudaEventRecord(done_event,stream1);
//wait for last transfer to finish
cudaEventSynchronize(done_event);
// clean up
//cudaFree (devWork);
//cudaFree (devInv);
//cudaFree (devA);
//cudaFree (devA2);
//cudaFreeHost (ap);
#ifdef PRINT_FLOPS
printf("BLOCK_INV ZGEMM FLOPS: %llu\n", flops*4*2);
#endif
}
|
4a4e0c766ecb300644679b1d241c83d6c5ac5ee7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <assert.h>
#include <stdlib.h>
using namespace std;
// 7 x 7 convolutional mask
#define MASK_DIM 7
// Amount the the matrix will hang over the matrix
#define MASK_OFFSET (MASK_DIM / 2)
// Allocate mask in constant memory
__constant__ int mask[7 * 7];
// 2D Convolution Kernel
// Takes:
// matrix: Input matrix
// result: Convolution result
// N: Dimensions of the matrices
__global__ void convolution_2d(int *matrix, int *result, int N){
// Calculate the global thread positions
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Starting index for calculation
int start_r = row - MASK_OFFSET;
int start_c = col - MASK_OFFSET;
// Temp value for accumulating the result
int temp = 0;
// Iterate over all the rows
for(int i = 0; i < MASK_DIM; i++){
// Go over each column
for(int j = 0; j < MASK_DIM; j++){
// Range check for rows
if((start_r + i) >= 0 && (start_r + i) < N){
// Range check for columns
if((start_c + j) >= 0 && (start_c + j) < N){
// Accumulate result
temp += matrix[(start_r + i) * N + (start_c + j)] *
mask[i * MASK_DIM + j];
}
}
}
}
// Write back the result
result[row * N + col] = temp;
}
// Initializes an n x n matrix with random numbers
// Takes:
// m : Pointer to the matrix
// n : Dimension of the matrix (square)
void init_matrix(int *m, int n){
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++){
m[n * i + j] = rand() % 100;
}
}
}
// Verifies the 2D convolution result on the CPU
// Takes:
// m: Original matrix
// mask: Convolutional mask
// result: Result from the GPU
// N: Dimensions of the matrix
void verify_result(int *m, int *mask, int *result, int N){
// Temp value for accumulating results
int temp;
// Intermediate value for more readable code
int offset_r;
int offset_c;
// Go over each row
for(int i = 0; i < N; i++){
// Go over each column
for(int j = 0; j < N; j++){
// Reset the temp variable
temp = 0;
// Go over each mask row
for(int k = 0; k < MASK_DIM; k++){
// Update offset value for row
offset_r = i - MASK_OFFSET + k;
// Go over each mask column
for(int l = 0; l < MASK_DIM; l++){
// Update offset value for column
offset_c = j - MASK_OFFSET + l;
// Range checks if we are hanging off the matrix
if(offset_r >= 0 && offset_r < N){
if(offset_c >= 0 && offset_c < N){
// Accumulate partial results
temp += m[offset_r * N + offset_c] *
mask[k * MASK_DIM + l];
}
}
}
}
// Fail if the results don't match
assert(result[i * N + j] == temp);
}
}
}
int main(){
// Dimensions of the matrix (2 ^ 10 x 2 ^ 10)
int N = 1 << 20;
// Size of the matrix (in bytes)
size_t bytes_n = N * N * sizeof(int);
// Allocate the matrix and initialize it
int *matrix = new int[N * N];
int *result = new int[N * N];
init_matrix(matrix, N);
// Size of the mask in bytes
size_t bytes_m = MASK_DIM * MASK_DIM * sizeof(int);
// Allocate the mask and initialize it
int *h_mask = new int[MASK_DIM * MASK_DIM];
init_matrix(h_mask, MASK_DIM);
// Allocate device memory
int *d_matrix;
int *d_result;
hipMalloc(&d_matrix, bytes_n);
hipMalloc(&d_result, bytes_n);
// Copy data to the device
hipMemcpy(d_matrix, matrix, bytes_n, hipMemcpyHostToDevice);
hipMemcpyToSymbol(mask, h_mask, bytes_m);
// Calculate grid dimensions
int THREADS = 16;
int BLOCKS = (N + THREADS - 1) / THREADS;
// Dimension launch arguments
dim3 block_dim(THREADS, THREADS);
dim3 grid_dim(BLOCKS, BLOCKS);
// Perform 2D Convolution
hipLaunchKernelGGL(( convolution_2d), dim3(grid_dim), dim3(block_dim), 0, 0, d_matrix, d_result, N);
// Copy the result back to the CPU
hipMemcpy(result, d_result, bytes_n, hipMemcpyDeviceToHost);
// Functional test
verify_result(matrix, h_mask, result, N);
cout << "COMPLETED SUCCESSFULLY!" << endl;
// Free the memory we allocated
delete [] matrix;
delete [] result;
delete [] h_mask;
hipFree(d_matrix);
hipFree(d_result);
return 0;
}
| 4a4e0c766ecb300644679b1d241c83d6c5ac5ee7.cu | #include <iostream>
#include <assert.h>
#include <stdlib.h>
using namespace std;
// 7 x 7 convolutional mask
#define MASK_DIM 7
// Amount the the matrix will hang over the matrix
#define MASK_OFFSET (MASK_DIM / 2)
// Allocate mask in constant memory
__constant__ int mask[7 * 7];
// 2D Convolution Kernel
// Takes:
// matrix: Input matrix
// result: Convolution result
// N: Dimensions of the matrices
__global__ void convolution_2d(int *matrix, int *result, int N){
// Calculate the global thread positions
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Starting index for calculation
int start_r = row - MASK_OFFSET;
int start_c = col - MASK_OFFSET;
// Temp value for accumulating the result
int temp = 0;
// Iterate over all the rows
for(int i = 0; i < MASK_DIM; i++){
// Go over each column
for(int j = 0; j < MASK_DIM; j++){
// Range check for rows
if((start_r + i) >= 0 && (start_r + i) < N){
// Range check for columns
if((start_c + j) >= 0 && (start_c + j) < N){
// Accumulate result
temp += matrix[(start_r + i) * N + (start_c + j)] *
mask[i * MASK_DIM + j];
}
}
}
}
// Write back the result
result[row * N + col] = temp;
}
// Initializes an n x n matrix with random numbers
// Takes:
// m : Pointer to the matrix
// n : Dimension of the matrix (square)
void init_matrix(int *m, int n){
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++){
m[n * i + j] = rand() % 100;
}
}
}
// Verifies the 2D convolution result on the CPU
// Takes:
// m: Original matrix
// mask: Convolutional mask
// result: Result from the GPU
// N: Dimensions of the matrix
void verify_result(int *m, int *mask, int *result, int N){
// Temp value for accumulating results
int temp;
// Intermediate value for more readable code
int offset_r;
int offset_c;
// Go over each row
for(int i = 0; i < N; i++){
// Go over each column
for(int j = 0; j < N; j++){
// Reset the temp variable
temp = 0;
// Go over each mask row
for(int k = 0; k < MASK_DIM; k++){
// Update offset value for row
offset_r = i - MASK_OFFSET + k;
// Go over each mask column
for(int l = 0; l < MASK_DIM; l++){
// Update offset value for column
offset_c = j - MASK_OFFSET + l;
// Range checks if we are hanging off the matrix
if(offset_r >= 0 && offset_r < N){
if(offset_c >= 0 && offset_c < N){
// Accumulate partial results
temp += m[offset_r * N + offset_c] *
mask[k * MASK_DIM + l];
}
}
}
}
// Fail if the results don't match
assert(result[i * N + j] == temp);
}
}
}
int main(){
// Dimensions of the matrix (2 ^ 10 x 2 ^ 10)
int N = 1 << 20;
// Size of the matrix (in bytes)
size_t bytes_n = N * N * sizeof(int);
// Allocate the matrix and initialize it
int *matrix = new int[N * N];
int *result = new int[N * N];
init_matrix(matrix, N);
// Size of the mask in bytes
size_t bytes_m = MASK_DIM * MASK_DIM * sizeof(int);
// Allocate the mask and initialize it
int *h_mask = new int[MASK_DIM * MASK_DIM];
init_matrix(h_mask, MASK_DIM);
// Allocate device memory
int *d_matrix;
int *d_result;
cudaMalloc(&d_matrix, bytes_n);
cudaMalloc(&d_result, bytes_n);
// Copy data to the device
cudaMemcpy(d_matrix, matrix, bytes_n, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(mask, h_mask, bytes_m);
// Calculate grid dimensions
int THREADS = 16;
int BLOCKS = (N + THREADS - 1) / THREADS;
// Dimension launch arguments
dim3 block_dim(THREADS, THREADS);
dim3 grid_dim(BLOCKS, BLOCKS);
// Perform 2D Convolution
convolution_2d<<<grid_dim, block_dim>>>(d_matrix, d_result, N);
// Copy the result back to the CPU
cudaMemcpy(result, d_result, bytes_n, cudaMemcpyDeviceToHost);
// Functional test
verify_result(matrix, h_mask, result, N);
cout << "COMPLETED SUCCESSFULLY!" << endl;
// Free the memory we allocated
delete [] matrix;
delete [] result;
delete [] h_mask;
cudaFree(d_matrix);
cudaFree(d_result);
return 0;
}
|
b741af3217158269045934ebd04601e1a4d334ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "../core/cpab_ops.cuh"
#define DIV_UP(a, b) (((a) + (b)-1) / (b))
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
typedef Eigen::GpuDevice GPUDevice;
void cpab_cuda_forward(const GPUDevice& device, const float* points, const float* trels,
const int* nstepsolver, const int* nc, const int broadcast,
const int ndim, const int nP, const int batch_size, float* output, const int circularity){
// Kernel configuration
dim3 bc((int)ceil(nP/256.0), batch_size);
dim3 tpb(256, 1);
// Launch kernel
// We do it in this way, since dynamically allocating memory in CUDA sucks!
if(ndim == 1){
hipLaunchKernelGGL(( cpab_cuda_kernel_forward_1D), dim3(bc), dim3(tpb), 0, 0, nP, batch_size, output,
points, trels, nstepsolver,
nc, broadcast, circularity);
}
if(ndim == 2){
hipLaunchKernelGGL(( cpab_cuda_kernel_forward_2D), dim3(bc), dim3(tpb), 0, 0, nP, batch_size, output,
points, trels, nstepsolver,
nc, broadcast, circularity);
}
if(ndim == 3){
hipLaunchKernelGGL(( cpab_cuda_kernel_forward_3D), dim3(bc), dim3(tpb), 0, 0, nP, batch_size, output,
points, trels, nstepsolver,
nc, broadcast, circularity);
}
return;
}
void cpab_cuda_backward(const GPUDevice& device, const float* points, const float* As,
const float* Bs, const int* nstepsolver, const int* nc,
const int broadcast, const int ndim, const int nP,
const int n_theta, const int d, const int nC, float* output, const int circularity){
// Kernel configuration
dim3 tpb = dim3(::min((int)nP, 128), ::min((int)n_theta, 4), ::min((int)d, 1));
dim3 bc = dim3(DIV_UP(nP, tpb.x), DIV_UP(n_theta, tpb.y), DIV_UP(d, tpb.z));
dim3 vtc = dim3(nP, n_theta, d);
// Launch kernel
// We do it in this way, since dynamically allocating memory in CUDA sucks!
if(ndim == 1){
hipLaunchKernelGGL(( cpab_cuda_kernel_backward_1D), dim3(bc), dim3(tpb), 0, 0, vtc, n_theta, d, nP, nC,
output, points, As, Bs,
nstepsolver, nc, broadcast, circularity);
}
if(ndim == 2){
hipLaunchKernelGGL(( cpab_cuda_kernel_backward_2D), dim3(bc), dim3(tpb), 0, 0, vtc, n_theta, d, nP, nC,
output, points, As, Bs,
nstepsolver, nc, broadcast, circularity);
}
if(ndim == 3){
hipLaunchKernelGGL(( cpab_cuda_kernel_backward_3D), dim3(bc), dim3(tpb), 0, 0, vtc, n_theta, d, nP, nC,
output, points, As, Bs,
nstepsolver, nc, broadcast, circularity);
}
gpuErrchk( hipPeekAtLastError() );
return;
}
#endif
| b741af3217158269045934ebd04601e1a4d334ef.cu | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "../core/cpab_ops.cuh"
#define DIV_UP(a, b) (((a) + (b)-1) / (b))
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
typedef Eigen::GpuDevice GPUDevice;
void cpab_cuda_forward(const GPUDevice& device, const float* points, const float* trels,
const int* nstepsolver, const int* nc, const int broadcast,
const int ndim, const int nP, const int batch_size, float* output, const int circularity){
// Kernel configuration
dim3 bc((int)ceil(nP/256.0), batch_size);
dim3 tpb(256, 1);
// Launch kernel
// We do it in this way, since dynamically allocating memory in CUDA sucks!
if(ndim == 1){
cpab_cuda_kernel_forward_1D<<<bc, tpb>>>(nP, batch_size, output,
points, trels, nstepsolver,
nc, broadcast, circularity);
}
if(ndim == 2){
cpab_cuda_kernel_forward_2D<<<bc, tpb>>>(nP, batch_size, output,
points, trels, nstepsolver,
nc, broadcast, circularity);
}
if(ndim == 3){
cpab_cuda_kernel_forward_3D<<<bc, tpb>>>(nP, batch_size, output,
points, trels, nstepsolver,
nc, broadcast, circularity);
}
return;
}
void cpab_cuda_backward(const GPUDevice& device, const float* points, const float* As,
const float* Bs, const int* nstepsolver, const int* nc,
const int broadcast, const int ndim, const int nP,
const int n_theta, const int d, const int nC, float* output, const int circularity){
// Kernel configuration
dim3 tpb = dim3(std::min((int)nP, 128), std::min((int)n_theta, 4), std::min((int)d, 1));
dim3 bc = dim3(DIV_UP(nP, tpb.x), DIV_UP(n_theta, tpb.y), DIV_UP(d, tpb.z));
dim3 vtc = dim3(nP, n_theta, d);
// Launch kernel
// We do it in this way, since dynamically allocating memory in CUDA sucks!
if(ndim == 1){
cpab_cuda_kernel_backward_1D<<<bc, tpb>>>(vtc, n_theta, d, nP, nC,
output, points, As, Bs,
nstepsolver, nc, broadcast, circularity);
}
if(ndim == 2){
cpab_cuda_kernel_backward_2D<<<bc, tpb>>>(vtc, n_theta, d, nP, nC,
output, points, As, Bs,
nstepsolver, nc, broadcast, circularity);
}
if(ndim == 3){
cpab_cuda_kernel_backward_3D<<<bc, tpb>>>(vtc, n_theta, d, nP, nC,
output, points, As, Bs,
nstepsolver, nc, broadcast, circularity);
}
gpuErrchk( cudaPeekAtLastError() );
return;
}
#endif
|
e77d651e86c9e9a7f5d46f1f7718f5d484c5b54f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "range.hpp"
#include "utils.h"
#include <algorithm>
#include <cassert>
namespace culib {
namespace util {
struct cuda_helper_t cuda_helper;
void cuda_chk_launch(int num_thd, int num_blk, size_t smem) {
assert(num_thd <= cuda_helper.prop.maxThreadsPerBlock && "num_thd error");
assert(num_blk <= 65535 && "num_blk error");
assert(cuda_helper.prop.sharedMemPerBlock >= smem && "smem error");
}
int cuda_num_thd(int num) {
return ::min(cuda_helper.prop.maxThreadsPerBlock, num);
}
void cuda_free_safe(void *p) { cudaChk(hipFree(p)); }
__device__ unsigned dynamic_smem_size() {
unsigned ret;
asm volatile("mov.u32 %0, %dynamic_smem_size;" : "=r"(ret));
return ret;
}
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
assert(stat == hipSuccess);
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat),
file, line);
exit(1);
}
}
__global__ void __kernel_to_half(half *dst, const float *src,
const size_t len) {
for (auto i : grid_stride_range(std::size_t(0), len)) {
dst[i] = __float2half_rn(src[i]);
}
}
void to_half_devptr(half *dst, const float *src, const size_t len) {
int num_blk, num_thd;
hipOccupancyMaxPotentialBlockSize(&num_blk, &num_thd, __kernel_to_half);
hipLaunchKernelGGL(( __kernel_to_half), dim3(num_blk), dim3(num_thd), 0, 0, dst, src, len);
}
void sync_streams(hipStream_t *streams, int num) {
for (size_t i = 0; i < num; i++) {
cudaChk(hipStreamSynchronize(streams[i]));
}
}
} // namespace util
} // namespace culib | e77d651e86c9e9a7f5d46f1f7718f5d484c5b54f.cu | #include "range.hpp"
#include "utils.h"
#include <algorithm>
#include <cassert>
namespace culib {
namespace util {
struct cuda_helper_t cuda_helper;
void cuda_chk_launch(int num_thd, int num_blk, size_t smem) {
assert(num_thd <= cuda_helper.prop.maxThreadsPerBlock && "num_thd error");
assert(num_blk <= 65535 && "num_blk error");
assert(cuda_helper.prop.sharedMemPerBlock >= smem && "smem error");
}
int cuda_num_thd(int num) {
return std::min(cuda_helper.prop.maxThreadsPerBlock, num);
}
void cuda_free_safe(void *p) { cudaChk(cudaFree(p)); }
__device__ unsigned dynamic_smem_size() {
unsigned ret;
asm volatile("mov.u32 %0, %dynamic_smem_size;" : "=r"(ret));
return ret;
}
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
assert(stat == cudaSuccess);
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat),
file, line);
exit(1);
}
}
__global__ void __kernel_to_half(half *dst, const float *src,
const size_t len) {
for (auto i : grid_stride_range(std::size_t(0), len)) {
dst[i] = __float2half_rn(src[i]);
}
}
void to_half_devptr(half *dst, const float *src, const size_t len) {
int num_blk, num_thd;
cudaOccupancyMaxPotentialBlockSize(&num_blk, &num_thd, __kernel_to_half);
__kernel_to_half<<<num_blk, num_thd>>>(dst, src, len);
}
void sync_streams(cudaStream_t *streams, int num) {
for (size_t i = 0; i < num; i++) {
cudaChk(cudaStreamSynchronize(streams[i]));
}
}
} // namespace util
} // namespace culib |
ccc6237616de907fe11c2429f66247b0d4035728.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define CHECK_STATUS(status) \
if (status != hipSuccess) \
fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\
hipGetErrorString(status))
// Device code
__global__ void MyKernel(float* devPtr,
size_t pitch, int width, int height)
{
for (int r = 0; r < height; ++r) {
float* row = (float*)((char*)devPtr + r * pitch);//r
for (int c = 0; c < width; ++c) {
float element = row[c];
}
}
}
int main(int argc, char **argv) {
CHECK_STATUS(hipSetDevice(0));
size_t width = 64, height = 64;
float* devPtr;
size_t pitch;
//
CHECK_STATUS(hipMallocPitch(&devPtr, &pitch, width * sizeof(float), height));
//
hipLaunchKernelGGL(( MyKernel), dim3(100), dim3(512), 0, 0, devPtr, pitch, width, height);
//
CHECK_STATUS(hipGetLastError());
//
CHECK_STATUS(hipFree(devPtr));
return 0;
}
| ccc6237616de907fe11c2429f66247b0d4035728.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define CHECK_STATUS(status) \
if (status != cudaSuccess) \
fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\
cudaGetErrorString(status))
// Device code
__global__ void MyKernel(float* devPtr,
size_t pitch, int width, int height)
{
for (int r = 0; r < height; ++r) {
float* row = (float*)((char*)devPtr + r * pitch);//第r行
for (int c = 0; c < width; ++c) {
float element = row[c];
}
}
}
int main(int argc, char **argv) {
CHECK_STATUS(cudaSetDevice(0));
size_t width = 64, height = 64;
float* devPtr;
size_t pitch;
// 分配二维数组
CHECK_STATUS(cudaMallocPitch(&devPtr, &pitch, width * sizeof(float), height));
// 调用内核
MyKernel<<<100, 512>>>(devPtr, pitch, width, height);
// 检查错误
CHECK_STATUS(cudaGetLastError());
// 释放内存
CHECK_STATUS(cudaFree(devPtr));
return 0;
}
|
0a77946aea6b463fc516a50695ec8d7f3f8b2a6c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2020 by XGBoost Contributors
*/
#include <thrust/reduce.h>
#include <thrust/iterator/transform_iterator.h>
#include <algorithm>
#include <ctgmath>
#include <limits>
#include "xgboost/base.h"
#include "row_partitioner_hip.cuh"
#include "histogram.cuh"
#include "../../data/ellpack_page.cuh"
#include "../../common/device_helpers.cuh"
namespace xgboost {
namespace tree {
// Following 2 functions are slightly modifed version of fbcuda.
/* \brief Constructs a rounding factor used to truncate elements in a sum such that the
sum of the truncated elements is the same no matter what the order of the sum is.
* Algorithm 5: Reproducible Sequential Sum in 'Fast Reproducible Floating-Point
* Summation' by Demmel and Nguyen
* In algorithm 5 the bound is calculated as $max(|v_i|) * n$. Here we use the bound
*
* \begin{equation}
* max( fl(\sum^{V}_{v_i>0}{v_i}), fl(\sum^{V}_{v_i<0}|v_i|) )
* \end{equation}
*
* to avoid outliers, as the full reduction is reproducible on GPU with reduction tree.
*/
template <typename T>
XGBOOST_DEV_INLINE __host__ T CreateRoundingFactor(T max_abs, int n) {
T delta = max_abs / (static_cast<T>(1.0) - 2 * n * std::numeric_limits<T>::epsilon());
// Calculate ceil(log_2(delta)).
// frexpf() calculates exp and returns `x` such that
// delta = x * 2^exp, where `x` in (-1.0, -0.5] U [0.5, 1).
// Because |x| < 1, exp is exactly ceil(log_2(delta)).
int exp;
::frexp(delta, &exp);
// return M = 2 ^ ceil(log_2(delta))
return std::ldexp(static_cast<T>(1.0), exp);
}
namespace {
struct Pair {
GradientPair first;
GradientPair second;
};
__host__ XGBOOST_DEV_INLINE Pair operator+(Pair const& lhs, Pair const& rhs) {
return {lhs.first + rhs.first, lhs.second + rhs.second};
}
} // anonymous namespace
struct Clip : public thrust::unary_function<GradientPair, Pair> {
static XGBOOST_DEV_INLINE float Pclip(float v) {
return v > 0 ? v : 0;
}
static XGBOOST_DEV_INLINE float Nclip(float v) {
return v < 0 ? abs(v) : 0;
}
XGBOOST_DEV_INLINE Pair operator()(GradientPair x) const {
auto pg = Pclip(x.GetGrad());
auto ph = Pclip(x.GetHess());
auto ng = Nclip(x.GetGrad());
auto nh = Nclip(x.GetHess());
return { GradientPair{ pg, ph }, GradientPair{ ng, nh } };
}
};
template <typename GradientSumT>
GradientSumT CreateRoundingFactor(common::Span<GradientPair const> gpair) {
using T = typename GradientSumT::ValueT;
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::device_ptr<GradientPair const> gpair_beg {gpair.data()};
thrust::device_ptr<GradientPair const> gpair_end {gpair.data() + gpair.size()};
auto beg = thrust::make_transform_iterator(gpair_beg, Clip());
auto end = thrust::make_transform_iterator(gpair_end, Clip());
Pair p = dh::Reduce(thrust::hip::par(alloc), beg, end, Pair{}, thrust::plus<Pair>{});
GradientPair positive_sum {p.first}, negative_sum {p.second};
auto histogram_rounding = GradientSumT {
CreateRoundingFactor<T>(::max(positive_sum.GetGrad(), negative_sum.GetGrad()),
gpair.size()),
CreateRoundingFactor<T>(::max(positive_sum.GetHess(), negative_sum.GetHess()),
gpair.size()) };
return histogram_rounding;
}
template GradientPairPrecise CreateRoundingFactor(common::Span<GradientPair const> gpair);
template GradientPair CreateRoundingFactor(common::Span<GradientPair const> gpair);
template <typename GradientSumT>
__global__ void SharedMemHistKernel(EllpackDeviceAccessor matrix,
FeatureGroupsAccessor feature_groups,
common::Span<const RowPartitioner::RowIndexT> d_ridx,
GradientSumT* __restrict__ d_node_hist,
const GradientPair* __restrict__ d_gpair,
GradientSumT const rounding,
bool use_shared_memory_histograms) {
using T = typename GradientSumT::ValueT;
extern __shared__ char smem[];
FeatureGroup group = feature_groups[blockIdx.y];
GradientSumT* smem_arr = reinterpret_cast<GradientSumT*>(smem); // NOLINT
if (use_shared_memory_histograms) {
dh::BlockFill(smem_arr, group.num_bins, GradientSumT());
__syncthreads();
}
int feature_stride = matrix.is_dense ? group.num_features : matrix.row_stride;
size_t n_elements = feature_stride * d_ridx.size();
for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) {
int ridx = d_ridx[idx / feature_stride];
int gidx = matrix.gidx_iter[ridx * matrix.row_stride + group.start_feature +
idx % feature_stride];
if (gidx != matrix.NumBins()) {
GradientSumT truncated {
TruncateWithRoundingFactor<T>(rounding.GetGrad(), d_gpair[ridx].GetGrad()),
TruncateWithRoundingFactor<T>(rounding.GetHess(), d_gpair[ridx].GetHess()),
};
// If we are not using shared memory, accumulate the values directly into
// global memory
GradientSumT* atomic_add_ptr =
use_shared_memory_histograms ? smem_arr : d_node_hist;
gidx = use_shared_memory_histograms ? gidx - group.start_bin : gidx;
dh::AtomicAddGpair(atomic_add_ptr + gidx, truncated);
}
}
if (use_shared_memory_histograms) {
// Write shared memory back to global memory
__syncthreads();
for (auto i : dh::BlockStrideRange(0, group.num_bins)) {
GradientSumT truncated{
TruncateWithRoundingFactor<T>(rounding.GetGrad(),
smem_arr[i].GetGrad()),
TruncateWithRoundingFactor<T>(rounding.GetHess(),
smem_arr[i].GetHess()),
};
dh::AtomicAddGpair(d_node_hist + group.start_bin + i, truncated);
}
}
}
template <typename GradientSumT>
void BuildGradientHistogram(EllpackDeviceAccessor const& matrix,
FeatureGroupsAccessor const& feature_groups,
common::Span<GradientPair const> gpair,
common::Span<const uint32_t> d_ridx,
common::Span<GradientSumT> histogram,
GradientSumT rounding) {
// decide whether to use shared memory
int device = 0;
dh::safe_cuda(hipGetDevice(&device));
int max_shared_memory = dh::MaxSharedMemoryOptin(device);
size_t smem_size = sizeof(GradientSumT) * feature_groups.max_group_bins;
bool shared = smem_size <= max_shared_memory;
smem_size = shared ? smem_size : 0;
// opt into maximum shared memory for the kernel if necessary
auto kernel = SharedMemHistKernel<GradientSumT>;
if (shared) {
dh::safe_cuda(hipFuncSetAttribute
(kernel, hipFuncAttributeMaxDynamicSharedMemorySize,
max_shared_memory));
}
// determine the launch configuration
int min_grid_size;
int block_threads = 1024;
dh::safe_cuda(hipOccupancyMaxPotentialBlockSize(
&min_grid_size, &block_threads, kernel, smem_size, 0));
int num_groups = feature_groups.NumGroups();
int n_mps = 0;
dh::safe_cuda(hipDeviceGetAttribute(&n_mps, hipDeviceAttributeMultiprocessorCount, device));
int n_blocks_per_mp = 0;
dh::safe_cuda(hipOccupancyMaxActiveBlocksPerMultiprocessor
(&n_blocks_per_mp, kernel, block_threads, smem_size));
unsigned grid_size = n_blocks_per_mp * n_mps;
// TODO(canonizer): This is really a hack, find a better way to distribute the
// data among thread blocks.
// The intention is to generate enough thread blocks to fill the GPU, but
// avoid having too many thread blocks, as this is less efficient when the
// number of rows is low. At least one thread block per feature group is
// required.
// The number of thread blocks:
// - for num_groups <= num_groups_threshold, around grid_size * num_groups
// - for num_groups_threshold <= num_groups <= num_groups_threshold * grid_size,
// around grid_size * num_groups_threshold
// - for num_groups_threshold * grid_size <= num_groups, around num_groups
int num_groups_threshold = 4;
grid_size = common::DivRoundUp(grid_size,
common::DivRoundUp(num_groups, num_groups_threshold));
dh::LaunchKernel {
dim3(grid_size, num_groups), static_cast<uint32_t>(block_threads), smem_size} (
kernel,
matrix, feature_groups, d_ridx, histogram.data(), gpair.data(), rounding,
shared);
dh::safe_cuda(hipGetLastError());
}
template void BuildGradientHistogram<GradientPair>(
EllpackDeviceAccessor const& matrix,
FeatureGroupsAccessor const& feature_groups,
common::Span<GradientPair const> gpair,
common::Span<const uint32_t> ridx,
common::Span<GradientPair> histogram,
GradientPair rounding);
template void BuildGradientHistogram<GradientPairPrecise>(
EllpackDeviceAccessor const& matrix,
FeatureGroupsAccessor const& feature_groups,
common::Span<GradientPair const> gpair,
common::Span<const uint32_t> ridx,
common::Span<GradientPairPrecise> histogram,
GradientPairPrecise rounding);
} // namespace tree
} // namespace xgboost
| 0a77946aea6b463fc516a50695ec8d7f3f8b2a6c.cu | /*!
* Copyright 2020 by XGBoost Contributors
*/
#include <thrust/reduce.h>
#include <thrust/iterator/transform_iterator.h>
#include <algorithm>
#include <ctgmath>
#include <limits>
#include "xgboost/base.h"
#include "row_partitioner.cuh"
#include "histogram.cuh"
#include "../../data/ellpack_page.cuh"
#include "../../common/device_helpers.cuh"
namespace xgboost {
namespace tree {
// Following 2 functions are slightly modifed version of fbcuda.
/* \brief Constructs a rounding factor used to truncate elements in a sum such that the
sum of the truncated elements is the same no matter what the order of the sum is.
* Algorithm 5: Reproducible Sequential Sum in 'Fast Reproducible Floating-Point
* Summation' by Demmel and Nguyen
* In algorithm 5 the bound is calculated as $max(|v_i|) * n$. Here we use the bound
*
* \begin{equation}
* max( fl(\sum^{V}_{v_i>0}{v_i}), fl(\sum^{V}_{v_i<0}|v_i|) )
* \end{equation}
*
* to avoid outliers, as the full reduction is reproducible on GPU with reduction tree.
*/
template <typename T>
XGBOOST_DEV_INLINE __host__ T CreateRoundingFactor(T max_abs, int n) {
T delta = max_abs / (static_cast<T>(1.0) - 2 * n * std::numeric_limits<T>::epsilon());
// Calculate ceil(log_2(delta)).
// frexpf() calculates exp and returns `x` such that
// delta = x * 2^exp, where `x` in (-1.0, -0.5] U [0.5, 1).
// Because |x| < 1, exp is exactly ceil(log_2(delta)).
int exp;
std::frexp(delta, &exp);
// return M = 2 ^ ceil(log_2(delta))
return std::ldexp(static_cast<T>(1.0), exp);
}
namespace {
struct Pair {
GradientPair first;
GradientPair second;
};
__host__ XGBOOST_DEV_INLINE Pair operator+(Pair const& lhs, Pair const& rhs) {
return {lhs.first + rhs.first, lhs.second + rhs.second};
}
} // anonymous namespace
struct Clip : public thrust::unary_function<GradientPair, Pair> {
static XGBOOST_DEV_INLINE float Pclip(float v) {
return v > 0 ? v : 0;
}
static XGBOOST_DEV_INLINE float Nclip(float v) {
return v < 0 ? abs(v) : 0;
}
XGBOOST_DEV_INLINE Pair operator()(GradientPair x) const {
auto pg = Pclip(x.GetGrad());
auto ph = Pclip(x.GetHess());
auto ng = Nclip(x.GetGrad());
auto nh = Nclip(x.GetHess());
return { GradientPair{ pg, ph }, GradientPair{ ng, nh } };
}
};
template <typename GradientSumT>
GradientSumT CreateRoundingFactor(common::Span<GradientPair const> gpair) {
using T = typename GradientSumT::ValueT;
dh::XGBCachingDeviceAllocator<char> alloc;
thrust::device_ptr<GradientPair const> gpair_beg {gpair.data()};
thrust::device_ptr<GradientPair const> gpair_end {gpair.data() + gpair.size()};
auto beg = thrust::make_transform_iterator(gpair_beg, Clip());
auto end = thrust::make_transform_iterator(gpair_end, Clip());
Pair p = dh::Reduce(thrust::cuda::par(alloc), beg, end, Pair{}, thrust::plus<Pair>{});
GradientPair positive_sum {p.first}, negative_sum {p.second};
auto histogram_rounding = GradientSumT {
CreateRoundingFactor<T>(std::max(positive_sum.GetGrad(), negative_sum.GetGrad()),
gpair.size()),
CreateRoundingFactor<T>(std::max(positive_sum.GetHess(), negative_sum.GetHess()),
gpair.size()) };
return histogram_rounding;
}
template GradientPairPrecise CreateRoundingFactor(common::Span<GradientPair const> gpair);
template GradientPair CreateRoundingFactor(common::Span<GradientPair const> gpair);
template <typename GradientSumT>
__global__ void SharedMemHistKernel(EllpackDeviceAccessor matrix,
FeatureGroupsAccessor feature_groups,
common::Span<const RowPartitioner::RowIndexT> d_ridx,
GradientSumT* __restrict__ d_node_hist,
const GradientPair* __restrict__ d_gpair,
GradientSumT const rounding,
bool use_shared_memory_histograms) {
using T = typename GradientSumT::ValueT;
extern __shared__ char smem[];
FeatureGroup group = feature_groups[blockIdx.y];
GradientSumT* smem_arr = reinterpret_cast<GradientSumT*>(smem); // NOLINT
if (use_shared_memory_histograms) {
dh::BlockFill(smem_arr, group.num_bins, GradientSumT());
__syncthreads();
}
int feature_stride = matrix.is_dense ? group.num_features : matrix.row_stride;
size_t n_elements = feature_stride * d_ridx.size();
for (auto idx : dh::GridStrideRange(static_cast<size_t>(0), n_elements)) {
int ridx = d_ridx[idx / feature_stride];
int gidx = matrix.gidx_iter[ridx * matrix.row_stride + group.start_feature +
idx % feature_stride];
if (gidx != matrix.NumBins()) {
GradientSumT truncated {
TruncateWithRoundingFactor<T>(rounding.GetGrad(), d_gpair[ridx].GetGrad()),
TruncateWithRoundingFactor<T>(rounding.GetHess(), d_gpair[ridx].GetHess()),
};
// If we are not using shared memory, accumulate the values directly into
// global memory
GradientSumT* atomic_add_ptr =
use_shared_memory_histograms ? smem_arr : d_node_hist;
gidx = use_shared_memory_histograms ? gidx - group.start_bin : gidx;
dh::AtomicAddGpair(atomic_add_ptr + gidx, truncated);
}
}
if (use_shared_memory_histograms) {
// Write shared memory back to global memory
__syncthreads();
for (auto i : dh::BlockStrideRange(0, group.num_bins)) {
GradientSumT truncated{
TruncateWithRoundingFactor<T>(rounding.GetGrad(),
smem_arr[i].GetGrad()),
TruncateWithRoundingFactor<T>(rounding.GetHess(),
smem_arr[i].GetHess()),
};
dh::AtomicAddGpair(d_node_hist + group.start_bin + i, truncated);
}
}
}
template <typename GradientSumT>
void BuildGradientHistogram(EllpackDeviceAccessor const& matrix,
FeatureGroupsAccessor const& feature_groups,
common::Span<GradientPair const> gpair,
common::Span<const uint32_t> d_ridx,
common::Span<GradientSumT> histogram,
GradientSumT rounding) {
// decide whether to use shared memory
int device = 0;
dh::safe_cuda(cudaGetDevice(&device));
int max_shared_memory = dh::MaxSharedMemoryOptin(device);
size_t smem_size = sizeof(GradientSumT) * feature_groups.max_group_bins;
bool shared = smem_size <= max_shared_memory;
smem_size = shared ? smem_size : 0;
// opt into maximum shared memory for the kernel if necessary
auto kernel = SharedMemHistKernel<GradientSumT>;
if (shared) {
dh::safe_cuda(cudaFuncSetAttribute
(kernel, cudaFuncAttributeMaxDynamicSharedMemorySize,
max_shared_memory));
}
// determine the launch configuration
int min_grid_size;
int block_threads = 1024;
dh::safe_cuda(cudaOccupancyMaxPotentialBlockSize(
&min_grid_size, &block_threads, kernel, smem_size, 0));
int num_groups = feature_groups.NumGroups();
int n_mps = 0;
dh::safe_cuda(cudaDeviceGetAttribute(&n_mps, cudaDevAttrMultiProcessorCount, device));
int n_blocks_per_mp = 0;
dh::safe_cuda(cudaOccupancyMaxActiveBlocksPerMultiprocessor
(&n_blocks_per_mp, kernel, block_threads, smem_size));
unsigned grid_size = n_blocks_per_mp * n_mps;
// TODO(canonizer): This is really a hack, find a better way to distribute the
// data among thread blocks.
// The intention is to generate enough thread blocks to fill the GPU, but
// avoid having too many thread blocks, as this is less efficient when the
// number of rows is low. At least one thread block per feature group is
// required.
// The number of thread blocks:
// - for num_groups <= num_groups_threshold, around grid_size * num_groups
// - for num_groups_threshold <= num_groups <= num_groups_threshold * grid_size,
// around grid_size * num_groups_threshold
// - for num_groups_threshold * grid_size <= num_groups, around num_groups
int num_groups_threshold = 4;
grid_size = common::DivRoundUp(grid_size,
common::DivRoundUp(num_groups, num_groups_threshold));
dh::LaunchKernel {
dim3(grid_size, num_groups), static_cast<uint32_t>(block_threads), smem_size} (
kernel,
matrix, feature_groups, d_ridx, histogram.data(), gpair.data(), rounding,
shared);
dh::safe_cuda(cudaGetLastError());
}
template void BuildGradientHistogram<GradientPair>(
EllpackDeviceAccessor const& matrix,
FeatureGroupsAccessor const& feature_groups,
common::Span<GradientPair const> gpair,
common::Span<const uint32_t> ridx,
common::Span<GradientPair> histogram,
GradientPair rounding);
template void BuildGradientHistogram<GradientPairPrecise>(
EllpackDeviceAccessor const& matrix,
FeatureGroupsAccessor const& feature_groups,
common::Span<GradientPair const> gpair,
common::Span<const uint32_t> ridx,
common::Span<GradientPairPrecise> histogram,
GradientPairPrecise rounding);
} // namespace tree
} // namespace xgboost
|
bf228549f1eaf112f34d01fbf111cd9c7fc11c7c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <hipcub/hipcub.hpp>
#include <memory>
#include <vector>
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/operators/layer_norm_op.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
template <typename T>
using CudnnDataType = platform::CudnnDataType<T>;
template <typename T>
using LayerNormParamType = typename CudnnDataType<T>::BatchNormParamType;
inline static int GetDesiredBlockDim(int block_dim) {
const int kMaxBlockDim = 512;
return block_dim >= kMaxBlockDim
? kMaxBlockDim
: (1 << (static_cast<int>(std::log2f(block_dim))));
}
#define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \
case (1 << (log2_block_dim)): { \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM_CASE(...) \
FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(2, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(1, ##__VA_ARGS__)
#define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE( \
log2_block_dim, feature_size, kMaxBlockNum, ...) \
case (1 << (log2_block_dim)): { \
for (int i = 0; i < ::ceil(feature_size / (1.0 * kMaxBlockNum)); i++) { \
int col_offset = i * kMaxBlockNum; \
int block_num = ::min(feature_size - col_offset, kMaxBlockNum); \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} \
} break
#define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(feature_size, kMaxBlockNum, ...) \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(9, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(8, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(7, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(6, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(5, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(4, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(3, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(2, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(1, feature_size, kMaxBlockNum, \
##__VA_ARGS__)
static __device__ __forceinline__ float real_sqrt(float x) { return sqrtf(x); }
static __device__ __forceinline__ double real_sqrt(double x) { return sqrt(x); }
template <typename T>
struct PairForLayerNorm {
__device__ __forceinline__ PairForLayerNorm() {}
__device__ __forceinline__ PairForLayerNorm(const T &first, const T &second)
: first_(first), second_(second) {}
T first_;
T second_;
};
template <typename T>
struct PairForLayerNormAddFunctor {
__device__ __forceinline__ PairForLayerNorm<T> operator()(
const PairForLayerNorm<T> &p1, const PairForLayerNorm<T> &p2) {
return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_);
}
};
template <typename T, typename U, int BlockDim>
__global__ void LayerNormForward(const T *x, const U *scale, const U *bias,
T *y, U *mean, U *var, float epsilon,
int feature_size) {
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<double>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
// Step 1: Reduce to calculate mean and var
double mean_val = 0;
double var_val = 0;
for (int i = beg_idx; i < end_idx; i += BlockDim) {
U tmp = static_cast<U>(x[i]);
mean_val += tmp;
var_val += (tmp * tmp);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<double>(mean_val, var_val),
PairForLayerNormAddFunctor<double>());
if (threadIdx.x == 0) {
auto tmp = pair.first_ / feature_size;
mean[blockIdx.x] = static_cast<U>(tmp);
var[blockIdx.x] = static_cast<U>(pair.second_ / feature_size - tmp * tmp);
}
__syncthreads();
mean_val = mean[blockIdx.x];
var_val = static_cast<U>(real_sqrt(var[blockIdx.x] + epsilon));
// Step 2: Calculate y
if (scale != nullptr) {
if (bias != nullptr) {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>(
scale[j] * (static_cast<U>(x[i]) - mean_val) / var_val + bias[j]);
}
} else {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>(scale[j] * (static_cast<U>(x[i]) - mean_val) /
var_val);
}
}
} else { // scale == nullptr
if (bias != nullptr) {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) / var_val +
bias[j]);
}
} else {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) / var_val);
}
}
}
}
// Make sure that d_scale != nullptr && d_bias != nullptr
// Since d_scale != nullptr, scale would not be nullptr
template <typename T, typename U, int BlockDim, bool HasDx>
__global__ void LayerNormBackwardGradientAll(const T *x, const T *d_y,
U *d_scale, U *d_bias, T *d_x,
const U *mean, const U *var,
const U *scale, float epsilon,
int batch_size, int feature_size,
int col_offset) {
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int beg_idx = threadIdx.x * feature_size + (blockIdx.x + col_offset);
int end_idx = batch_size * feature_size + (blockIdx.x + col_offset);
int stride = BlockDim * feature_size;
U d_scale_partial = static_cast<U>(0), d_bias_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += stride) {
int row_idx = i / feature_size;
auto var_val = real_sqrt(static_cast<U>(var[row_idx]) + epsilon);
d_scale_partial += static_cast<U>(d_y[i]) *
(static_cast<U>(x[i]) - mean[row_idx]) / var_val;
d_bias_partial += static_cast<U>(d_y[i]);
if (HasDx) {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) *
scale[blockIdx.x + col_offset] / var_val);
}
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_scale_partial, d_bias_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_scale[blockIdx.x + col_offset] = pair.first_;
d_bias[blockIdx.x + col_offset] = pair.second_;
}
}
// Make sure that there is only one true expression: d_scale != nullptr
// or d_bias != nullptr
// Notice: scale may be nullptr
template <typename T, typename U, int BlockDim, bool HasDx, bool HasDScale>
__global__ void LayerNormBackwardGradientScaleOrBias(
const T *x, const T *d_y, U *d_scale, U *d_bias, T *d_x, const U *mean,
const U *var, const U *scale, float epsilon, int batch_size,
int feature_size, int col_offset) {
using BlockReduce = hipcub::BlockReduce<U, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int beg_idx = threadIdx.x * feature_size + blockIdx.x + col_offset;
int end_idx = batch_size * feature_size + blockIdx.x + col_offset;
int stride = BlockDim * feature_size;
U d_scale_or_d_bias_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += stride) {
int row_idx = i / feature_size;
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(var[row_idx]) + epsilon));
if (HasDScale) {
d_scale_or_d_bias_partial += static_cast<U>(d_y[i]) *
(static_cast<U>(x[i]) - mean[row_idx]) /
var_val;
} else { // d_bias != nullptr
d_scale_or_d_bias_partial += static_cast<U>(d_y[i]);
}
if (HasDx) {
if (scale != nullptr) {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) *
scale[blockIdx.x + col_offset] / var_val);
} else {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val);
}
}
}
d_scale_or_d_bias_partial =
BlockReduce(temp_storage).Reduce(d_scale_or_d_bias_partial, hipcub::Sum());
if (threadIdx.x == 0) {
if (HasDScale) {
d_scale[blockIdx.x + col_offset] = d_scale_or_d_bias_partial;
} else {
d_bias[blockIdx.x + col_offset] = d_scale_or_d_bias_partial;
}
}
}
template <typename T, typename U, int BlockDim>
__global__ void LayerNormBackwardPostProcessToCalculateDX(const T *x, T *d_x,
const U *mean,
const U *var,
float epsilon,
int feature_size) {
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ U d_x_reduce_tmp[2];
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
U block_mean = mean[blockIdx.x];
U block_var = var[blockIdx.x];
U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x_mean_partial += static_cast<U>(d_x[i]);
d_x_var_partial +=
static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean);
}
auto pair =
BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size;
d_x_reduce_tmp[1] =
static_cast<float>(pair.second_) /
(feature_size * (static_cast<float>(block_var) + epsilon));
}
__syncthreads();
d_x_mean_partial = d_x_reduce_tmp[0];
d_x_var_partial = d_x_reduce_tmp[1];
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x[i] -= static_cast<T>(d_x_mean_partial);
d_x[i] -=
static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial);
}
}
// Here, we only calculate d_x
template <typename T, typename U, int BlockDim>
__global__ void LayerNormBackwardGradientOnlyDX(const T *x, const T *d_y,
T *d_x, const U *mean,
const U *var, const U *scale,
float epsilon,
int feature_size) {
using BlockReduce = hipcub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ U d_x_reduce_tmp[2];
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
U block_mean = mean[blockIdx.x], block_var = var[blockIdx.x];
U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += BlockDim) {
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(block_var) + epsilon));
if (scale != nullptr) {
int col_idx = i % feature_size;
d_x[i] =
static_cast<T>(static_cast<U>(d_y[i]) * scale[col_idx] / var_val);
} else {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val);
}
d_x_mean_partial += static_cast<U>(d_x[i]);
d_x_var_partial +=
static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean);
}
auto pair =
BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size;
d_x_reduce_tmp[1] =
static_cast<float>(pair.second_) /
(feature_size * (static_cast<float>(block_var) + epsilon));
}
__syncthreads();
d_x_mean_partial = d_x_reduce_tmp[0];
d_x_var_partial = d_x_reduce_tmp[1];
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x[i] -= static_cast<T>(d_x_mean_partial);
d_x[i] -=
static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial);
}
}
template <typename T, typename U>
__global__ void LayerNormBackwardWhenBatchSizeIsOne(
const T *x, const T *d_y, T *d_x, U *d_scale, U *d_bias, const U *mean,
const U *var, const U *scale, float epsilon, int feature_size) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < feature_size) {
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(var[idx]) + epsilon));
if (d_x != nullptr) {
if (d_scale == nullptr) {
d_x[idx] = static_cast<T>(static_cast<U>(d_y[idx]) / var_val);
} else {
d_x[idx] =
static_cast<T>(static_cast<U>(d_y[idx]) * scale[idx] / var_val);
}
}
if (d_scale != nullptr) {
d_scale[idx] = static_cast<U>(d_y[idx]) *
(static_cast<U>(x[idx]) - mean[idx]) / var_val;
}
if (d_bias != nullptr) d_bias[idx] = static_cast<U>(d_y[idx]);
}
}
template <typename T, typename U>
static void LayerNormBackward(const T *x, const T *d_y, const U *scale,
const U *mean, const U *var, T *d_x, U *d_scale,
U *d_bias, float epsilon, int batch_size,
int feature_size, hipStream_t stream) {
const int kMaxBlockDim = 512;
const int kMaxBlockNum = 128;
int gradient_flag = ((d_x != nullptr ? 1 : 0) << 2) |
((d_scale != nullptr ? 1 : 0) << 1) |
((d_bias != nullptr ? 1 : 0));
if (gradient_flag == 0) return;
if (batch_size == 1) {
hipLaunchKernelGGL(( LayerNormBackwardWhenBatchSizeIsOne<
T, U>), dim3((feature_size + kMaxBlockDim - 1) / kMaxBlockDim), dim3(kMaxBlockDim),
0, stream, x, d_y, d_x, d_scale, d_bias, mean, var, scale,
epsilon, feature_size);
if (d_x != nullptr) {
switch (GetDesiredBlockDim(feature_size)) {
hipLaunchKernelGGL(( FIXED_BLOCK_DIM_CASE(LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim>), dim3(1), dim3(kBlockDim), 0, stream,
x, d_x, mean, var, epsilon, feature_size));
}
}
return;
}
auto block_dim = GetDesiredBlockDim(batch_size);
switch (gradient_flag) {
case 1: // d_x == nulptr, d_scale == nullptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, false,
false>), dim3(block_num), dim3(kBlockDim), 0, stream,
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 2: // d_x == nullptr, d_scale != nullptr, d_bias == nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, false,
true>), dim3(block_num), dim3(kBlockDim), 0, stream,
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 3: // d_x == nullptr, d_scale != nulptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
hipLaunchKernelGGL(( LayerNormBackwardGradientAll<
T, U, kBlockDim, false>), dim3(block_num), dim3(kBlockDim), 0, stream,
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 4: // d_x != nullptr, d_scale == nullptr, d_bias == nullptr
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( LayerNormBackwardGradientOnlyDX<
T, U, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream,
x, d_y, d_x, mean, var, scale, epsilon, feature_size));
}
break;
case 5: // d_x != nulptr, d_scale == nullptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, true,
false>), dim3(block_num), dim3(kBlockDim), 0, stream,
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream,
x, d_x, mean, var, epsilon, feature_size));
}
break;
case 6: // d_x != nullptr, d_scale != nullptr, d_bias == nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
hipLaunchKernelGGL(( LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, true,
true>), dim3(block_num), dim3(kBlockDim), 0, stream,
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream,
x, d_x, mean, var, epsilon, feature_size));
}
break;
case 7: // d_x != nullptr, d_scale != nullptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
hipLaunchKernelGGL(( LayerNormBackwardGradientAll<
T, U, kBlockDim, true>), dim3(block_num), dim3(kBlockDim), 0, stream,
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream,
x, d_x, mean, var, epsilon, feature_size));
}
break;
default:
break;
}
}
template <typename T>
void LayerNormDirectCUDAFunctor<T>::operator()(hipStream_t stream,
const T *input,
std::vector<int> input_shape,
const T *bias, const T *scale,
T *output, T *mean, T *variance,
int begin_norm_axis, float eps) {
const auto x_dims = framework::make_ddim(input_shape);
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( LayerNormForward<T, T, kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream,
input, scale, bias, output, mean, variance, eps, feature_size));
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Product from begin_norm_axis to end in layer_norm must be larger "
"than 1"));
break;
}
}
template <typename T>
class LayerNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const float epsilon = ctx.Attr<float>("epsilon");
auto *scale = ctx.Input<Tensor>("Scale");
auto *bias = ctx.Input<Tensor>("Bias");
auto *x = ctx.Input<Tensor>("X");
auto *y = ctx.Output<Tensor>("Y");
auto *mean = ctx.Output<Tensor>("Mean");
auto *var = ctx.Output<Tensor>("Variance");
const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis");
const auto x_dims = x->dims();
auto *x_data = x->data<T>();
auto *y_data = y->mutable_data<T>(ctx.GetPlace());
auto *mean_data = mean->mutable_data<LayerNormParamType<T>>(ctx.GetPlace());
auto *var_data = var->mutable_data<LayerNormParamType<T>>(ctx.GetPlace());
auto *scale_data =
(scale == nullptr ? nullptr : scale->data<LayerNormParamType<T>>());
auto *bias_data =
(bias == nullptr ? nullptr : bias->data<LayerNormParamType<T>>());
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
auto stream = ctx.cuda_device_context().stream();
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
hipLaunchKernelGGL(( LayerNormForward<T, LayerNormParamType<T>,
kBlockDim>), dim3(batch_size), dim3(kBlockDim), 0, stream,
x_data, scale_data, bias_data, y_data, mean_data, var_data,
epsilon, feature_size));
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Product from begin_norm_axis to end must be larger than 1"));
break;
}
}
};
template <typename T>
class LayerNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const float epsilon = ctx.Attr<float>("epsilon");
// d_x, d_scale, d_bias may be nullptr
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
auto *x = ctx.Input<Tensor>("X");
auto *mean = ctx.Input<Tensor>("Mean");
auto *var = ctx.Input<Tensor>("Variance");
auto *scale = ctx.Input<Tensor>("Scale");
auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto *x_data = x->data<T>();
auto *d_y_data = d_y->data<T>();
auto *mean_data = mean->data<U>();
auto *var_data = var->data<U>();
auto *scale_data = (scale == nullptr ? nullptr : scale->data<U>());
auto *d_scale_data =
(d_scale == nullptr ? nullptr
: d_scale->mutable_data<U>(ctx.GetPlace()));
auto *d_bias_data =
(d_bias == nullptr ? nullptr : d_bias->mutable_data<U>(ctx.GetPlace()));
auto *d_x_data =
(d_x == nullptr ? nullptr : d_x->mutable_data<T>(ctx.GetPlace()));
const auto &x_dims = x->dims();
const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis");
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
auto stream = ctx.cuda_device_context().stream();
LayerNormBackward<T, U>(x_data, d_y_data, scale_data, mean_data, var_data,
d_x_data, d_scale_data, d_bias_data, epsilon,
batch_size, feature_size, stream);
}
};
template class LayerNormDirectCUDAFunctor<float>;
#undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE
#undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE
#undef FIXED_BLOCK_DIM_CASE_BASE
#undef FIXED_BLOCK_DIM_CASE
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
layer_norm,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, float>,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, double>,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
layer_norm_grad,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, double>,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext,
plat::float16>);
| bf228549f1eaf112f34d01fbf111cd9c7fc11c7c.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cub/cub.cuh>
#include <memory>
#include <vector>
#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/operators/layer_norm_op.h"
#include "paddle/fluid/platform/cudnn_helper.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using DataLayout = framework::DataLayout;
template <typename T>
using CudnnDataType = platform::CudnnDataType<T>;
template <typename T>
using LayerNormParamType = typename CudnnDataType<T>::BatchNormParamType;
inline static int GetDesiredBlockDim(int block_dim) {
const int kMaxBlockDim = 512;
return block_dim >= kMaxBlockDim
? kMaxBlockDim
: (1 << (static_cast<int>(std::log2f(block_dim))));
}
#define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \
case (1 << (log2_block_dim)): { \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} break
#define FIXED_BLOCK_DIM_CASE(...) \
FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(2, ##__VA_ARGS__); \
FIXED_BLOCK_DIM_CASE_BASE(1, ##__VA_ARGS__)
#define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE( \
log2_block_dim, feature_size, kMaxBlockNum, ...) \
case (1 << (log2_block_dim)): { \
for (int i = 0; i < std::ceil(feature_size / (1.0 * kMaxBlockNum)); i++) { \
int col_offset = i * kMaxBlockNum; \
int block_num = std::min(feature_size - col_offset, kMaxBlockNum); \
constexpr auto kBlockDim = (1 << (log2_block_dim)); \
__VA_ARGS__; \
} \
} break
#define FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(feature_size, kMaxBlockNum, ...) \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(9, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(8, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(7, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(6, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(5, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(4, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(3, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(2, feature_size, kMaxBlockNum, \
##__VA_ARGS__); \
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE(1, feature_size, kMaxBlockNum, \
##__VA_ARGS__)
static __device__ __forceinline__ float real_sqrt(float x) { return sqrtf(x); }
static __device__ __forceinline__ double real_sqrt(double x) { return sqrt(x); }
template <typename T>
struct PairForLayerNorm {
__device__ __forceinline__ PairForLayerNorm() {}
__device__ __forceinline__ PairForLayerNorm(const T &first, const T &second)
: first_(first), second_(second) {}
T first_;
T second_;
};
template <typename T>
struct PairForLayerNormAddFunctor {
__device__ __forceinline__ PairForLayerNorm<T> operator()(
const PairForLayerNorm<T> &p1, const PairForLayerNorm<T> &p2) {
return PairForLayerNorm<T>(p1.first_ + p2.first_, p1.second_ + p2.second_);
}
};
template <typename T, typename U, int BlockDim>
__global__ void LayerNormForward(const T *x, const U *scale, const U *bias,
T *y, U *mean, U *var, float epsilon,
int feature_size) {
using BlockReduce = cub::BlockReduce<PairForLayerNorm<double>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
// Step 1: Reduce to calculate mean and var
double mean_val = 0;
double var_val = 0;
for (int i = beg_idx; i < end_idx; i += BlockDim) {
U tmp = static_cast<U>(x[i]);
mean_val += tmp;
var_val += (tmp * tmp);
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<double>(mean_val, var_val),
PairForLayerNormAddFunctor<double>());
if (threadIdx.x == 0) {
auto tmp = pair.first_ / feature_size;
mean[blockIdx.x] = static_cast<U>(tmp);
var[blockIdx.x] = static_cast<U>(pair.second_ / feature_size - tmp * tmp);
}
__syncthreads();
mean_val = mean[blockIdx.x];
var_val = static_cast<U>(real_sqrt(var[blockIdx.x] + epsilon));
// Step 2: Calculate y
if (scale != nullptr) {
if (bias != nullptr) {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>(
scale[j] * (static_cast<U>(x[i]) - mean_val) / var_val + bias[j]);
}
} else {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>(scale[j] * (static_cast<U>(x[i]) - mean_val) /
var_val);
}
}
} else { // scale == nullptr
if (bias != nullptr) {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) / var_val +
bias[j]);
}
} else {
for (int i = beg_idx, j = threadIdx.x; i < end_idx;
i += BlockDim, j += BlockDim) {
y[i] = static_cast<T>((static_cast<U>(x[i]) - mean_val) / var_val);
}
}
}
}
// Make sure that d_scale != nullptr && d_bias != nullptr
// Since d_scale != nullptr, scale would not be nullptr
template <typename T, typename U, int BlockDim, bool HasDx>
__global__ void LayerNormBackwardGradientAll(const T *x, const T *d_y,
U *d_scale, U *d_bias, T *d_x,
const U *mean, const U *var,
const U *scale, float epsilon,
int batch_size, int feature_size,
int col_offset) {
using BlockReduce = cub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int beg_idx = threadIdx.x * feature_size + (blockIdx.x + col_offset);
int end_idx = batch_size * feature_size + (blockIdx.x + col_offset);
int stride = BlockDim * feature_size;
U d_scale_partial = static_cast<U>(0), d_bias_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += stride) {
int row_idx = i / feature_size;
auto var_val = real_sqrt(static_cast<U>(var[row_idx]) + epsilon);
d_scale_partial += static_cast<U>(d_y[i]) *
(static_cast<U>(x[i]) - mean[row_idx]) / var_val;
d_bias_partial += static_cast<U>(d_y[i]);
if (HasDx) {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) *
scale[blockIdx.x + col_offset] / var_val);
}
}
auto pair = BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_scale_partial, d_bias_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_scale[blockIdx.x + col_offset] = pair.first_;
d_bias[blockIdx.x + col_offset] = pair.second_;
}
}
// Make sure that there is only one true expression: d_scale != nullptr
// or d_bias != nullptr
// Notice: scale may be nullptr
template <typename T, typename U, int BlockDim, bool HasDx, bool HasDScale>
__global__ void LayerNormBackwardGradientScaleOrBias(
const T *x, const T *d_y, U *d_scale, U *d_bias, T *d_x, const U *mean,
const U *var, const U *scale, float epsilon, int batch_size,
int feature_size, int col_offset) {
using BlockReduce = cub::BlockReduce<U, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
int beg_idx = threadIdx.x * feature_size + blockIdx.x + col_offset;
int end_idx = batch_size * feature_size + blockIdx.x + col_offset;
int stride = BlockDim * feature_size;
U d_scale_or_d_bias_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += stride) {
int row_idx = i / feature_size;
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(var[row_idx]) + epsilon));
if (HasDScale) {
d_scale_or_d_bias_partial += static_cast<U>(d_y[i]) *
(static_cast<U>(x[i]) - mean[row_idx]) /
var_val;
} else { // d_bias != nullptr
d_scale_or_d_bias_partial += static_cast<U>(d_y[i]);
}
if (HasDx) {
if (scale != nullptr) {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) *
scale[blockIdx.x + col_offset] / var_val);
} else {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val);
}
}
}
d_scale_or_d_bias_partial =
BlockReduce(temp_storage).Reduce(d_scale_or_d_bias_partial, cub::Sum());
if (threadIdx.x == 0) {
if (HasDScale) {
d_scale[blockIdx.x + col_offset] = d_scale_or_d_bias_partial;
} else {
d_bias[blockIdx.x + col_offset] = d_scale_or_d_bias_partial;
}
}
}
template <typename T, typename U, int BlockDim>
__global__ void LayerNormBackwardPostProcessToCalculateDX(const T *x, T *d_x,
const U *mean,
const U *var,
float epsilon,
int feature_size) {
using BlockReduce = cub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ U d_x_reduce_tmp[2];
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
U block_mean = mean[blockIdx.x];
U block_var = var[blockIdx.x];
U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x_mean_partial += static_cast<U>(d_x[i]);
d_x_var_partial +=
static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean);
}
auto pair =
BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size;
d_x_reduce_tmp[1] =
static_cast<float>(pair.second_) /
(feature_size * (static_cast<float>(block_var) + epsilon));
}
__syncthreads();
d_x_mean_partial = d_x_reduce_tmp[0];
d_x_var_partial = d_x_reduce_tmp[1];
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x[i] -= static_cast<T>(d_x_mean_partial);
d_x[i] -=
static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial);
}
}
// Here, we only calculate d_x
template <typename T, typename U, int BlockDim>
__global__ void LayerNormBackwardGradientOnlyDX(const T *x, const T *d_y,
T *d_x, const U *mean,
const U *var, const U *scale,
float epsilon,
int feature_size) {
using BlockReduce = cub::BlockReduce<PairForLayerNorm<U>, BlockDim>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ U d_x_reduce_tmp[2];
int beg_idx = blockIdx.x * feature_size + threadIdx.x;
int end_idx = (blockIdx.x + 1) * feature_size;
U block_mean = mean[blockIdx.x], block_var = var[blockIdx.x];
U d_x_mean_partial = static_cast<U>(0), d_x_var_partial = static_cast<U>(0);
for (int i = beg_idx; i < end_idx; i += BlockDim) {
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(block_var) + epsilon));
if (scale != nullptr) {
int col_idx = i % feature_size;
d_x[i] =
static_cast<T>(static_cast<U>(d_y[i]) * scale[col_idx] / var_val);
} else {
d_x[i] = static_cast<T>(static_cast<U>(d_y[i]) / var_val);
}
d_x_mean_partial += static_cast<U>(d_x[i]);
d_x_var_partial +=
static_cast<U>(d_x[i]) * (static_cast<U>(x[i]) - block_mean);
}
auto pair =
BlockReduce(temp_storage)
.Reduce(PairForLayerNorm<U>(d_x_mean_partial, d_x_var_partial),
PairForLayerNormAddFunctor<U>());
if (threadIdx.x == 0) {
d_x_reduce_tmp[0] = static_cast<float>(pair.first_) / feature_size;
d_x_reduce_tmp[1] =
static_cast<float>(pair.second_) /
(feature_size * (static_cast<float>(block_var) + epsilon));
}
__syncthreads();
d_x_mean_partial = d_x_reduce_tmp[0];
d_x_var_partial = d_x_reduce_tmp[1];
for (int i = beg_idx; i < end_idx; i += BlockDim) {
d_x[i] -= static_cast<T>(d_x_mean_partial);
d_x[i] -=
static_cast<T>((static_cast<U>(x[i]) - block_mean) * d_x_var_partial);
}
}
template <typename T, typename U>
__global__ void LayerNormBackwardWhenBatchSizeIsOne(
const T *x, const T *d_y, T *d_x, U *d_scale, U *d_bias, const U *mean,
const U *var, const U *scale, float epsilon, int feature_size) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < feature_size) {
auto var_val =
static_cast<U>(real_sqrt(static_cast<float>(var[idx]) + epsilon));
if (d_x != nullptr) {
if (d_scale == nullptr) {
d_x[idx] = static_cast<T>(static_cast<U>(d_y[idx]) / var_val);
} else {
d_x[idx] =
static_cast<T>(static_cast<U>(d_y[idx]) * scale[idx] / var_val);
}
}
if (d_scale != nullptr) {
d_scale[idx] = static_cast<U>(d_y[idx]) *
(static_cast<U>(x[idx]) - mean[idx]) / var_val;
}
if (d_bias != nullptr) d_bias[idx] = static_cast<U>(d_y[idx]);
}
}
template <typename T, typename U>
static void LayerNormBackward(const T *x, const T *d_y, const U *scale,
const U *mean, const U *var, T *d_x, U *d_scale,
U *d_bias, float epsilon, int batch_size,
int feature_size, cudaStream_t stream) {
const int kMaxBlockDim = 512;
const int kMaxBlockNum = 128;
int gradient_flag = ((d_x != nullptr ? 1 : 0) << 2) |
((d_scale != nullptr ? 1 : 0) << 1) |
((d_bias != nullptr ? 1 : 0));
if (gradient_flag == 0) return;
if (batch_size == 1) {
LayerNormBackwardWhenBatchSizeIsOne<
T, U><<<(feature_size + kMaxBlockDim - 1) / kMaxBlockDim, kMaxBlockDim,
0, stream>>>(x, d_y, d_x, d_scale, d_bias, mean, var, scale,
epsilon, feature_size);
if (d_x != nullptr) {
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim><<<1, kBlockDim, 0, stream>>>(
x, d_x, mean, var, epsilon, feature_size));
}
}
return;
}
auto block_dim = GetDesiredBlockDim(batch_size);
switch (gradient_flag) {
case 1: // d_x == nulptr, d_scale == nullptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, false,
false><<<block_num, kBlockDim, 0, stream>>>(
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 2: // d_x == nullptr, d_scale != nullptr, d_bias == nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, false,
true><<<block_num, kBlockDim, 0, stream>>>(
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 3: // d_x == nullptr, d_scale != nulptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
LayerNormBackwardGradientAll<
T, U, kBlockDim, false><<<block_num, kBlockDim, 0, stream>>>(
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
break;
case 4: // d_x != nullptr, d_scale == nullptr, d_bias == nullptr
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
LayerNormBackwardGradientOnlyDX<
T, U, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>(
x, d_y, d_x, mean, var, scale, epsilon, feature_size));
}
break;
case 5: // d_x != nulptr, d_scale == nullptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, true,
false><<<block_num, kBlockDim, 0, stream>>>(
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>(
x, d_x, mean, var, epsilon, feature_size));
}
break;
case 6: // d_x != nullptr, d_scale != nullptr, d_bias == nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
LayerNormBackwardGradientScaleOrBias<
T, U, kBlockDim, true,
true><<<block_num, kBlockDim, 0, stream>>>(
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>(
x, d_x, mean, var, epsilon, feature_size));
}
break;
case 7: // d_x != nullptr, d_scale != nullptr, d_bias != nullptr
switch (block_dim) {
FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE(
feature_size, kMaxBlockNum,
LayerNormBackwardGradientAll<
T, U, kBlockDim, true><<<block_num, kBlockDim, 0, stream>>>(
x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon,
batch_size, feature_size, col_offset));
}
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
LayerNormBackwardPostProcessToCalculateDX<
T, U, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>(
x, d_x, mean, var, epsilon, feature_size));
}
break;
default:
break;
}
}
template <typename T>
void LayerNormDirectCUDAFunctor<T>::operator()(cudaStream_t stream,
const T *input,
std::vector<int> input_shape,
const T *bias, const T *scale,
T *output, T *mean, T *variance,
int begin_norm_axis, float eps) {
const auto x_dims = framework::make_ddim(input_shape);
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
LayerNormForward<T, T, kBlockDim><<<batch_size, kBlockDim, 0, stream>>>(
input, scale, bias, output, mean, variance, eps, feature_size));
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Product from begin_norm_axis to end in layer_norm must be larger "
"than 1"));
break;
}
}
template <typename T>
class LayerNormKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
const float epsilon = ctx.Attr<float>("epsilon");
auto *scale = ctx.Input<Tensor>("Scale");
auto *bias = ctx.Input<Tensor>("Bias");
auto *x = ctx.Input<Tensor>("X");
auto *y = ctx.Output<Tensor>("Y");
auto *mean = ctx.Output<Tensor>("Mean");
auto *var = ctx.Output<Tensor>("Variance");
const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis");
const auto x_dims = x->dims();
auto *x_data = x->data<T>();
auto *y_data = y->mutable_data<T>(ctx.GetPlace());
auto *mean_data = mean->mutable_data<LayerNormParamType<T>>(ctx.GetPlace());
auto *var_data = var->mutable_data<LayerNormParamType<T>>(ctx.GetPlace());
auto *scale_data =
(scale == nullptr ? nullptr : scale->data<LayerNormParamType<T>>());
auto *bias_data =
(bias == nullptr ? nullptr : bias->data<LayerNormParamType<T>>());
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
auto stream = ctx.cuda_device_context().stream();
switch (GetDesiredBlockDim(feature_size)) {
FIXED_BLOCK_DIM_CASE(
LayerNormForward<T, LayerNormParamType<T>,
kBlockDim><<<batch_size, kBlockDim, 0, stream>>>(
x_data, scale_data, bias_data, y_data, mean_data, var_data,
epsilon, feature_size));
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Product from begin_norm_axis to end must be larger than 1"));
break;
}
}
};
template <typename T>
class LayerNormGradKernel<platform::CUDADeviceContext, T>
: public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &ctx) const override {
using U = LayerNormParamType<T>;
const float epsilon = ctx.Attr<float>("epsilon");
// d_x, d_scale, d_bias may be nullptr
auto *d_x = ctx.Output<Tensor>(framework::GradVarName("X"));
auto *d_scale = ctx.Output<Tensor>(framework::GradVarName("Scale"));
auto *d_bias = ctx.Output<Tensor>(framework::GradVarName("Bias"));
auto *x = ctx.Input<Tensor>("X");
auto *mean = ctx.Input<Tensor>("Mean");
auto *var = ctx.Input<Tensor>("Variance");
auto *scale = ctx.Input<Tensor>("Scale");
auto *d_y = ctx.Input<Tensor>(framework::GradVarName("Y"));
auto *x_data = x->data<T>();
auto *d_y_data = d_y->data<T>();
auto *mean_data = mean->data<U>();
auto *var_data = var->data<U>();
auto *scale_data = (scale == nullptr ? nullptr : scale->data<U>());
auto *d_scale_data =
(d_scale == nullptr ? nullptr
: d_scale->mutable_data<U>(ctx.GetPlace()));
auto *d_bias_data =
(d_bias == nullptr ? nullptr : d_bias->mutable_data<U>(ctx.GetPlace()));
auto *d_x_data =
(d_x == nullptr ? nullptr : d_x->mutable_data<T>(ctx.GetPlace()));
const auto &x_dims = x->dims();
const auto begin_norm_axis = ctx.Attr<int>("begin_norm_axis");
auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis);
int batch_size = static_cast<int>(matrix_dim[0]);
int feature_size = static_cast<int>(matrix_dim[1]);
auto stream = ctx.cuda_device_context().stream();
LayerNormBackward<T, U>(x_data, d_y_data, scale_data, mean_data, var_data,
d_x_data, d_scale_data, d_bias_data, epsilon,
batch_size, feature_size, stream);
}
};
template class LayerNormDirectCUDAFunctor<float>;
#undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE_BASE
#undef FIXED_BLOCK_DIM_FIXED_BLOCK_NUM_CASE
#undef FIXED_BLOCK_DIM_CASE_BASE
#undef FIXED_BLOCK_DIM_CASE
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
layer_norm,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, float>,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, double>,
ops::LayerNormKernel<paddle::platform::CUDADeviceContext, plat::float16>);
REGISTER_OP_CUDA_KERNEL(
layer_norm_grad,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext, double>,
ops::LayerNormGradKernel<paddle::platform::CUDADeviceContext,
plat::float16>);
|
0658696de27a640b8d822e8ba524244f34308abe.hip | // !!! This is a file automatically generated by hipify!!!
#include <gtest/gtest.h>
#include <thrust/copy.h>
#include <vector>
#include "../../../../src/common/categorical.h"
#include "../../../../src/tree/gpu_hist/histogram.cuh"
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../categorical_helpers.h"
#include "../../helpers.h"
namespace xgboost {
namespace tree {
void TestDeterministicHistogram(bool is_dense, int shm_size) {
size_t constexpr kBins = 256, kCols = 120, kRows = 16384, kRounds = 16;
float constexpr kLower = -1e-2, kUpper = 1e2;
float sparsity = is_dense ? 0.0f : 0.5f;
auto matrix = RandomDataGenerator(kRows, kCols, sparsity).GenerateDMatrix();
BatchParam batch_param{0, static_cast<int32_t>(kBins)};
for (auto const& batch : matrix->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
tree::RowPartitioner row_partitioner(0, kRows);
auto ridx = row_partitioner.GetRows(0);
int num_bins = kBins * kCols;
dh::device_vector<GradientPairInt64> histogram(num_bins);
auto d_histogram = dh::ToSpan(histogram);
auto gpair = GenerateRandomGradients(kRows, kLower, kUpper);
gpair.SetDevice(0);
FeatureGroups feature_groups(page->Cuts(), page->is_dense, shm_size,
sizeof(GradientPairInt64));
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
BuildGradientHistogram(page->GetDeviceAccessor(0),
feature_groups.DeviceAccessor(0), gpair.DeviceSpan(),
ridx, d_histogram, quantiser);
std::vector<GradientPairInt64> histogram_h(num_bins);
dh::safe_cuda(hipMemcpy(histogram_h.data(), d_histogram.data(),
num_bins * sizeof(GradientPairInt64),
hipMemcpyDeviceToHost));
for (size_t i = 0; i < kRounds; ++i) {
dh::device_vector<GradientPairInt64> new_histogram(num_bins);
auto d_new_histogram = dh::ToSpan(new_histogram);
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
BuildGradientHistogram(page->GetDeviceAccessor(0),
feature_groups.DeviceAccessor(0),
gpair.DeviceSpan(), ridx, d_new_histogram,
quantiser);
std::vector<GradientPairInt64> new_histogram_h(num_bins);
dh::safe_cuda(hipMemcpy(new_histogram_h.data(), d_new_histogram.data(),
num_bins * sizeof(GradientPairInt64),
hipMemcpyDeviceToHost));
for (size_t j = 0; j < new_histogram_h.size(); ++j) {
ASSERT_EQ(new_histogram_h[j].GetQuantisedGrad(), histogram_h[j].GetQuantisedGrad());
ASSERT_EQ(new_histogram_h[j].GetQuantisedHess(), histogram_h[j].GetQuantisedHess());
}
}
{
auto gpair = GenerateRandomGradients(kRows, kLower, kUpper);
gpair.SetDevice(0);
// Use a single feature group to compute the baseline.
FeatureGroups single_group(page->Cuts());
dh::device_vector<GradientPairInt64> baseline(num_bins);
BuildGradientHistogram(page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0),
gpair.DeviceSpan(), ridx, dh::ToSpan(baseline),
quantiser);
std::vector<GradientPairInt64> baseline_h(num_bins);
dh::safe_cuda(hipMemcpy(baseline_h.data(), baseline.data().get(),
num_bins * sizeof(GradientPairInt64),
hipMemcpyDeviceToHost));
for (size_t i = 0; i < baseline.size(); ++i) {
EXPECT_NEAR(baseline_h[i].GetQuantisedGrad(), histogram_h[i].GetQuantisedGrad(),
baseline_h[i].GetQuantisedGrad() * 1e-3);
}
}
}
}
TEST(Histogram, GPUDeterministic) {
std::vector<bool> is_dense_array{false, true};
std::vector<int> shm_sizes{48 * 1024, 64 * 1024, 160 * 1024};
for (bool is_dense : is_dense_array) {
for (int shm_size : shm_sizes) {
TestDeterministicHistogram(is_dense, shm_size);
}
}
}
void ValidateCategoricalHistogram(size_t n_categories, common::Span<GradientPairInt64> onehot,
common::Span<GradientPairInt64> cat) {
auto cat_sum = std::accumulate(cat.cbegin(), cat.cend(), GradientPairInt64{});
for (size_t c = 0; c < n_categories; ++c) {
auto zero = onehot[c * 2];
auto one = onehot[c * 2 + 1];
auto chosen = cat[c];
auto not_chosen = cat_sum - chosen;
ASSERT_EQ(zero, not_chosen);
ASSERT_EQ(one, chosen);
}
}
// Test 1 vs rest categorical histogram is equivalent to one hot encoded data.
void TestGPUHistogramCategorical(size_t num_categories) {
size_t constexpr kRows = 340;
size_t constexpr kBins = 256;
auto x = GenerateRandomCategoricalSingleColumn(kRows, num_categories);
auto cat_m = GetDMatrixFromData(x, kRows, 1);
cat_m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
BatchParam batch_param{0, static_cast<int32_t>(kBins)};
tree::RowPartitioner row_partitioner(0, kRows);
auto ridx = row_partitioner.GetRows(0);
dh::device_vector<GradientPairInt64> cat_hist(num_categories);
auto gpair = GenerateRandomGradients(kRows, 0, 2);
gpair.SetDevice(0);
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
/**
* Generate hist with cat data.
*/
for (auto const &batch : cat_m->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
FeatureGroups single_group(page->Cuts());
BuildGradientHistogram(page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0),
gpair.DeviceSpan(), ridx, dh::ToSpan(cat_hist),
quantiser);
}
/**
* Generate hist with one hot encoded data.
*/
auto x_encoded = OneHotEncodeFeature(x, num_categories);
auto encode_m = GetDMatrixFromData(x_encoded, kRows, num_categories);
dh::device_vector<GradientPairInt64> encode_hist(2 * num_categories);
for (auto const &batch : encode_m->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
FeatureGroups single_group(page->Cuts());
BuildGradientHistogram(page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0),
gpair.DeviceSpan(), ridx, dh::ToSpan(encode_hist),
quantiser);
}
std::vector<GradientPairInt64> h_cat_hist(cat_hist.size());
thrust::copy(cat_hist.begin(), cat_hist.end(), h_cat_hist.begin());
std::vector<GradientPairInt64> h_encode_hist(encode_hist.size());
thrust::copy(encode_hist.begin(), encode_hist.end(), h_encode_hist.begin());
ValidateCategoricalHistogram(num_categories,
common::Span<GradientPairInt64>{h_encode_hist},
common::Span<GradientPairInt64>{h_cat_hist});
}
TEST(Histogram, GPUHistCategorical) {
for (size_t num_categories = 2; num_categories < 8; ++num_categories) {
TestGPUHistogramCategorical(num_categories);
}
}
namespace {
// Atomic add as type cast for test.
XGBOOST_DEV_INLINE int64_t atomicAdd(int64_t *dst, int64_t src) { // NOLINT
uint64_t* u_dst = reinterpret_cast<uint64_t*>(dst);
uint64_t u_src = *reinterpret_cast<uint64_t*>(&src);
uint64_t ret = ::atomicAdd(u_dst, u_src);
return *reinterpret_cast<int64_t*>(&ret);
}
}
void TestAtomicAdd() {
size_t n_elements = 1024;
dh::device_vector<int64_t> result_a(1, 0);
auto d_result_a = result_a.data().get();
dh::device_vector<int64_t> result_b(1, 0);
auto d_result_b = result_b.data().get();
/**
* Test for simple inputs
*/
std::vector<int64_t> h_inputs(n_elements);
for (size_t i = 0; i < h_inputs.size(); ++i) {
h_inputs[i] = (i % 2 == 0) ? i : -i;
}
dh::device_vector<int64_t> inputs(h_inputs);
auto d_inputs = inputs.data().get();
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
/**
* Test for positive values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<uint32_t>::max() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_GT(result_a[0], std::numeric_limits<uint32_t>::max());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
/**
* Test for negative values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<int32_t>::min() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_LT(result_a[0], std::numeric_limits<int32_t>::min());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
}
TEST(Histogram, AtomicAddInt64) {
TestAtomicAdd();
}
} // namespace tree
} // namespace xgboost
| 0658696de27a640b8d822e8ba524244f34308abe.cu | #include <gtest/gtest.h>
#include <thrust/copy.h>
#include <vector>
#include "../../../../src/common/categorical.h"
#include "../../../../src/tree/gpu_hist/histogram.cuh"
#include "../../../../src/tree/gpu_hist/row_partitioner.cuh"
#include "../../categorical_helpers.h"
#include "../../helpers.h"
namespace xgboost {
namespace tree {
void TestDeterministicHistogram(bool is_dense, int shm_size) {
size_t constexpr kBins = 256, kCols = 120, kRows = 16384, kRounds = 16;
float constexpr kLower = -1e-2, kUpper = 1e2;
float sparsity = is_dense ? 0.0f : 0.5f;
auto matrix = RandomDataGenerator(kRows, kCols, sparsity).GenerateDMatrix();
BatchParam batch_param{0, static_cast<int32_t>(kBins)};
for (auto const& batch : matrix->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
tree::RowPartitioner row_partitioner(0, kRows);
auto ridx = row_partitioner.GetRows(0);
int num_bins = kBins * kCols;
dh::device_vector<GradientPairInt64> histogram(num_bins);
auto d_histogram = dh::ToSpan(histogram);
auto gpair = GenerateRandomGradients(kRows, kLower, kUpper);
gpair.SetDevice(0);
FeatureGroups feature_groups(page->Cuts(), page->is_dense, shm_size,
sizeof(GradientPairInt64));
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
BuildGradientHistogram(page->GetDeviceAccessor(0),
feature_groups.DeviceAccessor(0), gpair.DeviceSpan(),
ridx, d_histogram, quantiser);
std::vector<GradientPairInt64> histogram_h(num_bins);
dh::safe_cuda(cudaMemcpy(histogram_h.data(), d_histogram.data(),
num_bins * sizeof(GradientPairInt64),
cudaMemcpyDeviceToHost));
for (size_t i = 0; i < kRounds; ++i) {
dh::device_vector<GradientPairInt64> new_histogram(num_bins);
auto d_new_histogram = dh::ToSpan(new_histogram);
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
BuildGradientHistogram(page->GetDeviceAccessor(0),
feature_groups.DeviceAccessor(0),
gpair.DeviceSpan(), ridx, d_new_histogram,
quantiser);
std::vector<GradientPairInt64> new_histogram_h(num_bins);
dh::safe_cuda(cudaMemcpy(new_histogram_h.data(), d_new_histogram.data(),
num_bins * sizeof(GradientPairInt64),
cudaMemcpyDeviceToHost));
for (size_t j = 0; j < new_histogram_h.size(); ++j) {
ASSERT_EQ(new_histogram_h[j].GetQuantisedGrad(), histogram_h[j].GetQuantisedGrad());
ASSERT_EQ(new_histogram_h[j].GetQuantisedHess(), histogram_h[j].GetQuantisedHess());
}
}
{
auto gpair = GenerateRandomGradients(kRows, kLower, kUpper);
gpair.SetDevice(0);
// Use a single feature group to compute the baseline.
FeatureGroups single_group(page->Cuts());
dh::device_vector<GradientPairInt64> baseline(num_bins);
BuildGradientHistogram(page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0),
gpair.DeviceSpan(), ridx, dh::ToSpan(baseline),
quantiser);
std::vector<GradientPairInt64> baseline_h(num_bins);
dh::safe_cuda(cudaMemcpy(baseline_h.data(), baseline.data().get(),
num_bins * sizeof(GradientPairInt64),
cudaMemcpyDeviceToHost));
for (size_t i = 0; i < baseline.size(); ++i) {
EXPECT_NEAR(baseline_h[i].GetQuantisedGrad(), histogram_h[i].GetQuantisedGrad(),
baseline_h[i].GetQuantisedGrad() * 1e-3);
}
}
}
}
TEST(Histogram, GPUDeterministic) {
std::vector<bool> is_dense_array{false, true};
std::vector<int> shm_sizes{48 * 1024, 64 * 1024, 160 * 1024};
for (bool is_dense : is_dense_array) {
for (int shm_size : shm_sizes) {
TestDeterministicHistogram(is_dense, shm_size);
}
}
}
void ValidateCategoricalHistogram(size_t n_categories, common::Span<GradientPairInt64> onehot,
common::Span<GradientPairInt64> cat) {
auto cat_sum = std::accumulate(cat.cbegin(), cat.cend(), GradientPairInt64{});
for (size_t c = 0; c < n_categories; ++c) {
auto zero = onehot[c * 2];
auto one = onehot[c * 2 + 1];
auto chosen = cat[c];
auto not_chosen = cat_sum - chosen;
ASSERT_EQ(zero, not_chosen);
ASSERT_EQ(one, chosen);
}
}
// Test 1 vs rest categorical histogram is equivalent to one hot encoded data.
void TestGPUHistogramCategorical(size_t num_categories) {
size_t constexpr kRows = 340;
size_t constexpr kBins = 256;
auto x = GenerateRandomCategoricalSingleColumn(kRows, num_categories);
auto cat_m = GetDMatrixFromData(x, kRows, 1);
cat_m->Info().feature_types.HostVector().push_back(FeatureType::kCategorical);
BatchParam batch_param{0, static_cast<int32_t>(kBins)};
tree::RowPartitioner row_partitioner(0, kRows);
auto ridx = row_partitioner.GetRows(0);
dh::device_vector<GradientPairInt64> cat_hist(num_categories);
auto gpair = GenerateRandomGradients(kRows, 0, 2);
gpair.SetDevice(0);
auto quantiser = GradientQuantiser(gpair.DeviceSpan());
/**
* Generate hist with cat data.
*/
for (auto const &batch : cat_m->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
FeatureGroups single_group(page->Cuts());
BuildGradientHistogram(page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0),
gpair.DeviceSpan(), ridx, dh::ToSpan(cat_hist),
quantiser);
}
/**
* Generate hist with one hot encoded data.
*/
auto x_encoded = OneHotEncodeFeature(x, num_categories);
auto encode_m = GetDMatrixFromData(x_encoded, kRows, num_categories);
dh::device_vector<GradientPairInt64> encode_hist(2 * num_categories);
for (auto const &batch : encode_m->GetBatches<EllpackPage>(batch_param)) {
auto* page = batch.Impl();
FeatureGroups single_group(page->Cuts());
BuildGradientHistogram(page->GetDeviceAccessor(0),
single_group.DeviceAccessor(0),
gpair.DeviceSpan(), ridx, dh::ToSpan(encode_hist),
quantiser);
}
std::vector<GradientPairInt64> h_cat_hist(cat_hist.size());
thrust::copy(cat_hist.begin(), cat_hist.end(), h_cat_hist.begin());
std::vector<GradientPairInt64> h_encode_hist(encode_hist.size());
thrust::copy(encode_hist.begin(), encode_hist.end(), h_encode_hist.begin());
ValidateCategoricalHistogram(num_categories,
common::Span<GradientPairInt64>{h_encode_hist},
common::Span<GradientPairInt64>{h_cat_hist});
}
TEST(Histogram, GPUHistCategorical) {
for (size_t num_categories = 2; num_categories < 8; ++num_categories) {
TestGPUHistogramCategorical(num_categories);
}
}
namespace {
// Atomic add as type cast for test.
XGBOOST_DEV_INLINE int64_t atomicAdd(int64_t *dst, int64_t src) { // NOLINT
uint64_t* u_dst = reinterpret_cast<uint64_t*>(dst);
uint64_t u_src = *reinterpret_cast<uint64_t*>(&src);
uint64_t ret = ::atomicAdd(u_dst, u_src);
return *reinterpret_cast<int64_t*>(&ret);
}
}
void TestAtomicAdd() {
size_t n_elements = 1024;
dh::device_vector<int64_t> result_a(1, 0);
auto d_result_a = result_a.data().get();
dh::device_vector<int64_t> result_b(1, 0);
auto d_result_b = result_b.data().get();
/**
* Test for simple inputs
*/
std::vector<int64_t> h_inputs(n_elements);
for (size_t i = 0; i < h_inputs.size(); ++i) {
h_inputs[i] = (i % 2 == 0) ? i : -i;
}
dh::device_vector<int64_t> inputs(h_inputs);
auto d_inputs = inputs.data().get();
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
/**
* Test for positive values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<uint32_t>::max() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_GT(result_a[0], std::numeric_limits<uint32_t>::max());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
/**
* Test for negative values that don't fit into 32 bit integer.
*/
thrust::fill(inputs.begin(), inputs.end(),
(std::numeric_limits<int32_t>::min() / 2));
thrust::fill(result_a.begin(), result_a.end(), 0);
thrust::fill(result_b.begin(), result_b.end(), 0);
dh::LaunchN(n_elements, [=] __device__(size_t i) {
AtomicAdd64As32(d_result_a, d_inputs[i]);
atomicAdd(d_result_b, d_inputs[i]);
});
ASSERT_EQ(result_a[0], result_b[0]);
ASSERT_LT(result_a[0], std::numeric_limits<int32_t>::min());
CHECK_EQ(thrust::reduce(inputs.begin(), inputs.end(), int64_t(0)), result_a[0]);
}
TEST(Histogram, AtomicAddInt64) {
TestAtomicAdd();
}
} // namespace tree
} // namespace xgboost
|
44b0aafc6005c19eae0519fa26488393e56d4f4b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// https://github.com/opencv/opencv/pull/18136/commits/f617f18e46fa556daea060d3c69307567bbc65f7
// buildLutKernel<<<1, 256, 0, stream>>>
__global__ void buildLutKernel(int* hist, unsigned char* lut, int size) {
__shared__ int warp_smem[8];
__shared__ int hist_smem[8][33];
#define HIST_SMEM_NO_BANK_CONFLICT(idx) hist_smem[(idx) >> 5][(idx)&31]
const int tId = threadIdx.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
// Step1 - Find minimum non-zero value in hist and make it zero
HIST_SMEM_NO_BANK_CONFLICT(tId) = hist[tId];
int nonZeroIdx = HIST_SMEM_NO_BANK_CONFLICT(tId) > 0 ? tId : 256;
__syncthreads();
for (int delta = 16; delta > 0; delta /= 2) {
#if __CUDACC_VER_MAJOR__ >= 9
int shflVal = __shfl_down_sync(0xFFFFFFFF, nonZeroIdx, delta);
#else
int shflVal = __shfl_down(nonZeroIdx, delta);
#endif
if (laneId < delta) nonZeroIdx = min(nonZeroIdx, shflVal);
}
if (laneId == 0) warp_smem[warpId] = nonZeroIdx;
__syncthreads();
if (tId < 8) {
int warpVal = warp_smem[tId];
for (int delta = 4; delta > 0; delta /= 2) {
#if __CUDACC_VER_MAJOR__ >= 9
int shflVal = __shfl_down_sync(0x000000FF, warpVal, delta);
#else
int shflVal = __shfl_down(warpVal, delta);
#endif
if (tId < delta) warpVal = min(warpVal, shflVal);
}
if (tId == 0) {
warp_smem[0] = warpVal; // warpVal - minimum index
}
}
__syncthreads();
const int minNonZeroIdx = warp_smem[0];
const int minNonZeroVal = HIST_SMEM_NO_BANK_CONFLICT(minNonZeroIdx);
if (minNonZeroVal == size) {
// This is a special case: the whole image has the same color
lut[tId] = 0;
if (tId == minNonZeroIdx) lut[tId] = minNonZeroIdx;
return;
}
if (tId == 0) HIST_SMEM_NO_BANK_CONFLICT(minNonZeroIdx) = 0;
__syncthreads();
// Step2 - Inclusive sum
// Algorithm from GPU Gems 3 (A Work-Efficient Parallel Scan)
// https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda
// Step2 Phase1 - The Up-Sweep Phase
for (int delta = 1; delta < 256; delta *= 2) {
if (tId < 128 / delta) {
int idx = 255 - 2 * tId * delta;
HIST_SMEM_NO_BANK_CONFLICT(idx) +=
HIST_SMEM_NO_BANK_CONFLICT(idx - delta);
}
__syncthreads();
}
// Step2 Phase2 - The Down-Sweep Phase
if (tId == 0) HIST_SMEM_NO_BANK_CONFLICT(255) = 0;
for (int delta = 128; delta >= 1; delta /= 2) {
if (tId < 128 / delta) {
int rootIdx = 255 - tId * delta * 2;
int leftIdx = rootIdx - delta;
int tmp = HIST_SMEM_NO_BANK_CONFLICT(leftIdx);
HIST_SMEM_NO_BANK_CONFLICT(leftIdx) =
HIST_SMEM_NO_BANK_CONFLICT(rootIdx);
HIST_SMEM_NO_BANK_CONFLICT(rootIdx) += tmp;
}
__syncthreads();
}
// Step2 Phase3 - Convert exclusive sum to inclusive sum
int tmp = HIST_SMEM_NO_BANK_CONFLICT(tId);
__syncthreads();
if (tId >= 1) HIST_SMEM_NO_BANK_CONFLICT(tId - 1) = tmp;
if (tId == 255) HIST_SMEM_NO_BANK_CONFLICT(tId) = tmp + hist[tId];
__syncthreads();
// Step3 - Scale values to build lut
// lut[tId] = saturate_cast<unsigned char>(HIST_SMEM_NO_BANK_CONFLICT(tId) *
// (255.0f / (size - minNonZeroVal)));
#undef HIST_SMEM_NO_BANK_CONFLICT
} | 44b0aafc6005c19eae0519fa26488393e56d4f4b.cu |
// https://github.com/opencv/opencv/pull/18136/commits/f617f18e46fa556daea060d3c69307567bbc65f7
// buildLutKernel<<<1, 256, 0, stream>>>
__global__ void buildLutKernel(int* hist, unsigned char* lut, int size) {
__shared__ int warp_smem[8];
__shared__ int hist_smem[8][33];
#define HIST_SMEM_NO_BANK_CONFLICT(idx) hist_smem[(idx) >> 5][(idx)&31]
const int tId = threadIdx.x;
const int warpId = threadIdx.x / 32;
const int laneId = threadIdx.x % 32;
// Step1 - Find minimum non-zero value in hist and make it zero
HIST_SMEM_NO_BANK_CONFLICT(tId) = hist[tId];
int nonZeroIdx = HIST_SMEM_NO_BANK_CONFLICT(tId) > 0 ? tId : 256;
__syncthreads();
for (int delta = 16; delta > 0; delta /= 2) {
#if __CUDACC_VER_MAJOR__ >= 9
int shflVal = __shfl_down_sync(0xFFFFFFFF, nonZeroIdx, delta);
#else
int shflVal = __shfl_down(nonZeroIdx, delta);
#endif
if (laneId < delta) nonZeroIdx = min(nonZeroIdx, shflVal);
}
if (laneId == 0) warp_smem[warpId] = nonZeroIdx;
__syncthreads();
if (tId < 8) {
int warpVal = warp_smem[tId];
for (int delta = 4; delta > 0; delta /= 2) {
#if __CUDACC_VER_MAJOR__ >= 9
int shflVal = __shfl_down_sync(0x000000FF, warpVal, delta);
#else
int shflVal = __shfl_down(warpVal, delta);
#endif
if (tId < delta) warpVal = min(warpVal, shflVal);
}
if (tId == 0) {
warp_smem[0] = warpVal; // warpVal - minimum index
}
}
__syncthreads();
const int minNonZeroIdx = warp_smem[0];
const int minNonZeroVal = HIST_SMEM_NO_BANK_CONFLICT(minNonZeroIdx);
if (minNonZeroVal == size) {
// This is a special case: the whole image has the same color
lut[tId] = 0;
if (tId == minNonZeroIdx) lut[tId] = minNonZeroIdx;
return;
}
if (tId == 0) HIST_SMEM_NO_BANK_CONFLICT(minNonZeroIdx) = 0;
__syncthreads();
// Step2 - Inclusive sum
// Algorithm from GPU Gems 3 (A Work-Efficient Parallel Scan)
// https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda
// Step2 Phase1 - The Up-Sweep Phase
for (int delta = 1; delta < 256; delta *= 2) {
if (tId < 128 / delta) {
int idx = 255 - 2 * tId * delta;
HIST_SMEM_NO_BANK_CONFLICT(idx) +=
HIST_SMEM_NO_BANK_CONFLICT(idx - delta);
}
__syncthreads();
}
// Step2 Phase2 - The Down-Sweep Phase
if (tId == 0) HIST_SMEM_NO_BANK_CONFLICT(255) = 0;
for (int delta = 128; delta >= 1; delta /= 2) {
if (tId < 128 / delta) {
int rootIdx = 255 - tId * delta * 2;
int leftIdx = rootIdx - delta;
int tmp = HIST_SMEM_NO_BANK_CONFLICT(leftIdx);
HIST_SMEM_NO_BANK_CONFLICT(leftIdx) =
HIST_SMEM_NO_BANK_CONFLICT(rootIdx);
HIST_SMEM_NO_BANK_CONFLICT(rootIdx) += tmp;
}
__syncthreads();
}
// Step2 Phase3 - Convert exclusive sum to inclusive sum
int tmp = HIST_SMEM_NO_BANK_CONFLICT(tId);
__syncthreads();
if (tId >= 1) HIST_SMEM_NO_BANK_CONFLICT(tId - 1) = tmp;
if (tId == 255) HIST_SMEM_NO_BANK_CONFLICT(tId) = tmp + hist[tId];
__syncthreads();
// Step3 - Scale values to build lut
// lut[tId] = saturate_cast<unsigned char>(HIST_SMEM_NO_BANK_CONFLICT(tId) *
// (255.0f / (size - minNonZeroVal)));
#undef HIST_SMEM_NO_BANK_CONFLICT
} |
26ae5b5ec58cc0cacae36c3c2f405f0d0f7e6a02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "common\book.h"
#include "common\cpu_anim.h"
#include <stdio.h>
#define DIM 1024
#define PI 3.1415926535897932
struct DataBlock {
unsigned char *d_bitmap;
CPUAnimBitmap *bitmap;
};
__global__ void kernel( unsigned char* ptr, int ticks) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f * cos(d/10.f - ticks/7.0f) / (d/10.0f + 1.0f));
ptr[offset*4 + 0] = grey;
ptr[offset*4 + 1] = grey;
ptr[offset*4 + 2] = grey;
ptr[offset*4 + 3] = 255;
}
void generate_frame( DataBlock *d, int ticks) {
dim3 blocks(DIM/16, DIM/16);
dim3 threads(16,16);
hipLaunchKernelGGL(( kernel), dim3(blocks),dim3(threads), 0, 0, d->d_bitmap, ticks);
hipMemcpy(d->bitmap->get_ptr(),
d->d_bitmap,
d->bitmap->image_size(),
hipMemcpyDeviceToHost);
}
void cleanup (DataBlock *d) {
hipFree( d->d_bitmap);
}
int main( void ) {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
hipMalloc( (void**)&data.d_bitmap, bitmap.image_size());
bitmap.anim_and_exit((void (*)(void*, int))generate_frame,
(void (*)(void*))cleanup);
} | 26ae5b5ec58cc0cacae36c3c2f405f0d0f7e6a02.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "common\book.h"
#include "common\cpu_anim.h"
#include <stdio.h>
#define DIM 1024
#define PI 3.1415926535897932
struct DataBlock {
unsigned char *d_bitmap;
CPUAnimBitmap *bitmap;
};
__global__ void kernel( unsigned char* ptr, int ticks) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float fx = x - DIM / 2;
float fy = y - DIM / 2;
float d = sqrtf(fx * fx + fy * fy);
unsigned char grey = (unsigned char)(128.0f + 127.0f * cos(d/10.f - ticks/7.0f) / (d/10.0f + 1.0f));
ptr[offset*4 + 0] = grey;
ptr[offset*4 + 1] = grey;
ptr[offset*4 + 2] = grey;
ptr[offset*4 + 3] = 255;
}
void generate_frame( DataBlock *d, int ticks) {
dim3 blocks(DIM/16, DIM/16);
dim3 threads(16,16);
kernel<<<blocks,threads>>>(d->d_bitmap, ticks);
cudaMemcpy(d->bitmap->get_ptr(),
d->d_bitmap,
d->bitmap->image_size(),
cudaMemcpyDeviceToHost);
}
void cleanup (DataBlock *d) {
cudaFree( d->d_bitmap);
}
int main( void ) {
DataBlock data;
CPUAnimBitmap bitmap(DIM, DIM, &data);
data.bitmap = &bitmap;
cudaMalloc( (void**)&data.d_bitmap, bitmap.image_size());
bitmap.anim_and_exit((void (*)(void*, int))generate_frame,
(void (*)(void*))cleanup);
} |
fedb141264aee1fa6e0e1f0434122a6ab5b501c7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm
#include "fill.hpp"
#include "../cuda/util.hpp"
#include "cuda/error.hpp"
#include <stdio.h>
namespace VideoStitch {
namespace Image {
namespace {
/**
* A kernel that fills the buffer with a color.
*/
__global__ void fillKernel(uint32_t* dst, unsigned width, unsigned height, int32_t color) {
const unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
dst[y * width + x] = color;
}
}
} // namespace
Status fill(uint32_t* devBuffer, int64_t width, int64_t height, int32_t color, hipStream_t stream) {
dim3 dimBlock(16, 16, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv((unsigned)width, dimBlock.x),
(unsigned)Cuda::ceilDiv((unsigned)height, dimBlock.y), 1);
hipLaunchKernelGGL(( fillKernel), dim3(dimGrid), dim3(dimBlock), 0, stream, devBuffer, (unsigned)width, (unsigned)height, color);
return CUDA_STATUS;
}
} // namespace Image
} // namespace VideoStitch
| fedb141264aee1fa6e0e1f0434122a6ab5b501c7.cu | // Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm
#include "fill.hpp"
#include "../cuda/util.hpp"
#include "cuda/error.hpp"
#include <stdio.h>
namespace VideoStitch {
namespace Image {
namespace {
/**
* A kernel that fills the buffer with a color.
*/
__global__ void fillKernel(uint32_t* dst, unsigned width, unsigned height, int32_t color) {
const unsigned x = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
dst[y * width + x] = color;
}
}
} // namespace
Status fill(uint32_t* devBuffer, int64_t width, int64_t height, int32_t color, cudaStream_t stream) {
dim3 dimBlock(16, 16, 1);
dim3 dimGrid((unsigned)Cuda::ceilDiv((unsigned)width, dimBlock.x),
(unsigned)Cuda::ceilDiv((unsigned)height, dimBlock.y), 1);
fillKernel<<<dimGrid, dimBlock, 0, stream>>>(devBuffer, (unsigned)width, (unsigned)height, color);
return CUDA_STATUS;
}
} // namespace Image
} // namespace VideoStitch
|
df952022c2b2b72b19a5110ea9fa26711c159978.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/box_coder_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void EncodeCenterSizeKernel(const T* prior_box_data,
const T* prior_box_var_data,
const T* target_box_data, const int row,
const int col, const int len,
T* output) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < row * col) {
const int row_idx = idx / col;
const int col_idx = idx % col;
T prior_box_width =
prior_box_data[col_idx * len + 2] - prior_box_data[col_idx * len];
T prior_box_height =
prior_box_data[col_idx * len + 3] - prior_box_data[col_idx * len + 1];
T prior_box_center_x =
(prior_box_data[col_idx * len + 2] + prior_box_data[col_idx * len]) / 2;
T prior_box_center_y = (prior_box_data[col_idx * len + 3] +
prior_box_data[col_idx * len + 1]) /
2;
T target_box_center_x =
(target_box_data[row_idx * len + 2] + target_box_data[row_idx * len]) /
2;
T target_box_center_y = (target_box_data[row_idx * len + 3] +
target_box_data[row_idx * len + 1]) /
2;
T target_box_width =
target_box_data[row_idx * len + 2] - target_box_data[row_idx * len];
T target_box_height =
target_box_data[row_idx * len + 3] - target_box_data[row_idx * len + 1];
output[idx * len] = (target_box_center_x - prior_box_center_x) /
prior_box_width / prior_box_var_data[col_idx * len];
output[idx * len + 1] = (target_box_center_y - prior_box_center_y) /
prior_box_height /
prior_box_var_data[col_idx * len + 1];
output[idx * len + 2] = log(fabs(target_box_width / prior_box_width)) /
prior_box_var_data[col_idx * len + 2];
output[idx * len + 3] = log(fabs(target_box_height / prior_box_height)) /
prior_box_var_data[col_idx * len + 3];
}
}
template <typename T>
__global__ void DecodeCenterSizeKernel(const T* prior_box_data,
const T* prior_box_var_data,
const T* target_box_data, const int row,
const int col, const int len,
T* output) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < row * col) {
const int col_idx = idx % col;
T prior_box_width =
prior_box_data[col_idx * len + 2] - prior_box_data[col_idx * len];
T prior_box_height =
prior_box_data[col_idx * len + 3] - prior_box_data[col_idx * len + 1];
T prior_box_center_x =
(prior_box_data[col_idx * len + 2] + prior_box_data[col_idx * len]) / 2;
T prior_box_center_y = (prior_box_data[col_idx * len + 3] +
prior_box_data[col_idx * len + 1]) /
2;
T target_box_width = exp(prior_box_var_data[col_idx * len + 2] *
target_box_data[idx * len + 2]) *
prior_box_width;
T target_box_height = exp(prior_box_var_data[col_idx * len + 3] *
target_box_data[idx * len + 3]) *
prior_box_height;
T target_box_center_x = prior_box_var_data[col_idx * len] *
target_box_data[idx * len] * prior_box_width +
prior_box_center_x;
T target_box_center_y = prior_box_var_data[col_idx * len + 1] *
target_box_data[idx * len + 1] *
prior_box_height +
prior_box_center_y;
output[idx * len] = target_box_center_x - target_box_width / 2;
output[idx * len + 1] = target_box_center_y - target_box_height / 2;
output[idx * len + 2] = target_box_center_x + target_box_width / 2;
output[idx * len + 3] = target_box_center_y + target_box_height / 2;
}
}
template <typename T>
class BoxCoderCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()),
"This kernel only runs on GPU device.");
auto* prior_box = context.Input<framework::Tensor>("PriorBox");
auto* prior_box_var = context.Input<framework::Tensor>("PriorBoxVar");
auto* target_box = context.Input<framework::LoDTensor>("TargetBox");
auto* output_box = context.Output<framework::Tensor>("OutputBox");
if (target_box->lod().size()) {
PADDLE_ENFORCE_EQ(target_box->lod().size(), 1,
"Only support 1 level of LoD.");
}
auto row = target_box->dims()[0];
auto col = prior_box->dims()[0];
auto len = prior_box->dims()[1];
int block = 512;
int grid = (row * col + block - 1) / block;
auto& device_ctx = context.cuda_device_context();
const T* prior_box_data = prior_box->data<T>();
const T* prior_box_var_data = prior_box_var->data<T>();
const T* target_box_data = target_box->data<T>();
output_box->mutable_data<T>({row, col, len}, context.GetPlace());
T* output = output_box->data<T>();
auto code_type = GetBoxCodeType(context.Attr<std::string>("code_type"));
if (code_type == BoxCodeType::kEncodeCenterSize) {
hipLaunchKernelGGL(( EncodeCenterSizeKernel<T>), dim3(grid), dim3(block), 0, device_ctx.stream(),
prior_box_data, prior_box_var_data, target_box_data, row, col, len,
output);
} else if (code_type == BoxCodeType::kDecodeCenterSize) {
hipLaunchKernelGGL(( DecodeCenterSizeKernel<T>), dim3(grid), dim3(block), 0, device_ctx.stream(),
prior_box_data, prior_box_var_data, target_box_data, row, col, len,
output);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(box_coder, ops::BoxCoderCUDAKernel<float>,
ops::BoxCoderCUDAKernel<double>);
| df952022c2b2b72b19a5110ea9fa26711c159978.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/detection/box_coder_op.h"
#include "paddle/fluid/platform/cuda_primitives.h"
namespace paddle {
namespace operators {
template <typename T>
__global__ void EncodeCenterSizeKernel(const T* prior_box_data,
const T* prior_box_var_data,
const T* target_box_data, const int row,
const int col, const int len,
T* output) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < row * col) {
const int row_idx = idx / col;
const int col_idx = idx % col;
T prior_box_width =
prior_box_data[col_idx * len + 2] - prior_box_data[col_idx * len];
T prior_box_height =
prior_box_data[col_idx * len + 3] - prior_box_data[col_idx * len + 1];
T prior_box_center_x =
(prior_box_data[col_idx * len + 2] + prior_box_data[col_idx * len]) / 2;
T prior_box_center_y = (prior_box_data[col_idx * len + 3] +
prior_box_data[col_idx * len + 1]) /
2;
T target_box_center_x =
(target_box_data[row_idx * len + 2] + target_box_data[row_idx * len]) /
2;
T target_box_center_y = (target_box_data[row_idx * len + 3] +
target_box_data[row_idx * len + 1]) /
2;
T target_box_width =
target_box_data[row_idx * len + 2] - target_box_data[row_idx * len];
T target_box_height =
target_box_data[row_idx * len + 3] - target_box_data[row_idx * len + 1];
output[idx * len] = (target_box_center_x - prior_box_center_x) /
prior_box_width / prior_box_var_data[col_idx * len];
output[idx * len + 1] = (target_box_center_y - prior_box_center_y) /
prior_box_height /
prior_box_var_data[col_idx * len + 1];
output[idx * len + 2] = log(fabs(target_box_width / prior_box_width)) /
prior_box_var_data[col_idx * len + 2];
output[idx * len + 3] = log(fabs(target_box_height / prior_box_height)) /
prior_box_var_data[col_idx * len + 3];
}
}
template <typename T>
__global__ void DecodeCenterSizeKernel(const T* prior_box_data,
const T* prior_box_var_data,
const T* target_box_data, const int row,
const int col, const int len,
T* output) {
const int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < row * col) {
const int col_idx = idx % col;
T prior_box_width =
prior_box_data[col_idx * len + 2] - prior_box_data[col_idx * len];
T prior_box_height =
prior_box_data[col_idx * len + 3] - prior_box_data[col_idx * len + 1];
T prior_box_center_x =
(prior_box_data[col_idx * len + 2] + prior_box_data[col_idx * len]) / 2;
T prior_box_center_y = (prior_box_data[col_idx * len + 3] +
prior_box_data[col_idx * len + 1]) /
2;
T target_box_width = exp(prior_box_var_data[col_idx * len + 2] *
target_box_data[idx * len + 2]) *
prior_box_width;
T target_box_height = exp(prior_box_var_data[col_idx * len + 3] *
target_box_data[idx * len + 3]) *
prior_box_height;
T target_box_center_x = prior_box_var_data[col_idx * len] *
target_box_data[idx * len] * prior_box_width +
prior_box_center_x;
T target_box_center_y = prior_box_var_data[col_idx * len + 1] *
target_box_data[idx * len + 1] *
prior_box_height +
prior_box_center_y;
output[idx * len] = target_box_center_x - target_box_width / 2;
output[idx * len + 1] = target_box_center_y - target_box_height / 2;
output[idx * len + 2] = target_box_center_x + target_box_width / 2;
output[idx * len + 3] = target_box_center_y + target_box_height / 2;
}
}
template <typename T>
class BoxCoderCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()),
"This kernel only runs on GPU device.");
auto* prior_box = context.Input<framework::Tensor>("PriorBox");
auto* prior_box_var = context.Input<framework::Tensor>("PriorBoxVar");
auto* target_box = context.Input<framework::LoDTensor>("TargetBox");
auto* output_box = context.Output<framework::Tensor>("OutputBox");
if (target_box->lod().size()) {
PADDLE_ENFORCE_EQ(target_box->lod().size(), 1,
"Only support 1 level of LoD.");
}
auto row = target_box->dims()[0];
auto col = prior_box->dims()[0];
auto len = prior_box->dims()[1];
int block = 512;
int grid = (row * col + block - 1) / block;
auto& device_ctx = context.cuda_device_context();
const T* prior_box_data = prior_box->data<T>();
const T* prior_box_var_data = prior_box_var->data<T>();
const T* target_box_data = target_box->data<T>();
output_box->mutable_data<T>({row, col, len}, context.GetPlace());
T* output = output_box->data<T>();
auto code_type = GetBoxCodeType(context.Attr<std::string>("code_type"));
if (code_type == BoxCodeType::kEncodeCenterSize) {
EncodeCenterSizeKernel<T><<<grid, block, 0, device_ctx.stream()>>>(
prior_box_data, prior_box_var_data, target_box_data, row, col, len,
output);
} else if (code_type == BoxCodeType::kDecodeCenterSize) {
DecodeCenterSizeKernel<T><<<grid, block, 0, device_ctx.stream()>>>(
prior_box_data, prior_box_var_data, target_box_data, row, col, len,
output);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(box_coder, ops::BoxCoderCUDAKernel<float>,
ops::BoxCoderCUDAKernel<double>);
|
edda540bfbbae9ea128fb81dddd535ef41bc47bd.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief
* ragged_ops
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
* Mobvoi Inc. (authors: Fangjun Kuang)
* Yiming Wang
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <memory>
#include <vector>
#include "hipcub/hipcub.hpp"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/moderngpu_allocator.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_ops.h"
#include "k2/csrc/ragged_utils.h"
#include "moderngpu/kernel_mergesort.hxx"
namespace {
/*
A helper function used in RaggedShape3;
if both first and second are non-NULL, it will check if the context of them
is compatible or not and return that context if compatible;
if one of them is NULL, returns the other one's context.
*/
static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first,
const k2::Array1<int32_t> *second) {
K2_CHECK(first != nullptr || second != nullptr)
<< "At least one of first and second must be non-NULL";
if (first == nullptr)
return second->Context();
else if (second == nullptr)
return first->Context();
else
return k2::GetContext(*first, *second);
}
} // namespace
namespace k2 {
RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes,
int32_t max_num_axes, int32_t min_num_elements,
int32_t max_num_elements) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetCpuContext();
K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes &&
min_num_elements >= 0 && max_num_elements >= min_num_elements);
int32_t num_axes = RandInt(min_num_axes, max_num_axes);
int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements);
bool done_repeats = false;
std::vector<RaggedShapeLayer> axes(num_axes - 1);
for (int32_t axis = num_axes - 2; axis >= 0; axis--) {
// this axis will have row_ids of length num_elements and
// row_splits of length to be determined.
int32_t cur_row_split = 0;
std::vector<int32_t> row_splits_vec;
std::vector<int32_t> row_ids_vec;
row_splits_vec.push_back(cur_row_split);
// The reason for "|| RandInt(0, 2) == 0)" is so that even if there
// are no elements we can still potentially generate empty row-splits.
while (cur_row_split < num_elements || RandInt(0, 2) == 0) {
int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split);
cur_row_split += split_size;
// sometimes we have a bunch of empty rows in a row (this will test out
// more of the code), so here we generate a bunch of empty rows, but we
// just do this only once (that's why we declare `done_repeats` here).
if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) {
int32_t num_repeats = RandIntGeometric(1, 128);
row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split);
// don't need to set `row_ids_vec` as there's no element.
done_repeats = true;
}
row_splits_vec.push_back(cur_row_split);
if (set_row_ids) {
int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2;
row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row);
}
}
axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec);
if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec);
axes[axis].cached_tot_size = num_elements;
num_elements = axes[axis].row_splits.Dim() - 1;
}
// RaggedShape(axes, true) will check the returned RaggedShape for
// consistency.
return RaggedShape(axes, true);
}
RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids,
int32_t cached_tot_size) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(row_splits != nullptr || row_ids != nullptr)
<< "At least one of row_splits and row_ids must be defined";
ContextPtr ctx = ::GetContext(row_splits, row_ids);
if (cached_tot_size != -1) {
if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim());
if (row_splits != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size, row_splits->Back())
<< "Bad row splits is: " << *row_splits;
}
}
std::vector<RaggedShapeLayer> axes(1);
if (row_splits != nullptr) {
axes[0].row_splits = *row_splits;
} else {
// we need to work out row_splits as we always require row_splits is not
// empty for RaggedShape. Note here we suppose the last element in row_ids
// is num_rows - 1, i.e. there's no empty rows after row `row_ids[-1]`.
int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1;
Array1<int32_t> row_splits_array(ctx, num_rows + 1);
RowIdsToRowSplits(*row_ids, &row_splits_array);
axes[0].row_splits = row_splits_array;
}
if (row_ids != nullptr) axes[0].row_ids = *row_ids;
if (cached_tot_size == -1) {
cached_tot_size =
row_ids != nullptr ? row_ids->Dim() : axes[0].row_splits.Back();
}
axes[0].cached_tot_size = cached_tot_size;
// note below line will check if row_splits and row_ids are valid and agree
// with each other.
return RaggedShape(axes);
}
RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) {
NVTX_RANGE(K2_FUNC);
if (a.NumElements() != b.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements()
<< " vs. " << b.Dim0();
}
const auto &a_axes = a.Layers();
const auto &b_axes = b.Layers();
std::vector<RaggedShapeLayer> axes(a_axes.size() + b_axes.size());
std::size_t a_size = a_axes.size(), b_size = b_axes.size();
for (std::size_t i = 0; i < a_size; ++i) axes[i] = a_axes[i];
for (std::size_t i = 0; i < b_size; ++i) axes[i + a_size] = b_axes[i];
bool validate = false;
return RaggedShape(axes, validate);
}
RaggedShape RaggedShape3(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape1 = RaggedShape2(row_splits1, row_ids1, cached_tot_size1);
Array1<int32_t> temp_array;
if (row_splits2 == nullptr) {
K2_CHECK_NE(row_ids2, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array = Array1<int32_t>(row_ids2->Context(), shape1.NumElements() + 1);
row_splits2 = &temp_array;
RowIdsToRowSplits(*row_ids2, row_splits2);
}
return ComposeRaggedShapes(
shape1, RaggedShape2(row_splits2, row_ids2, cached_tot_size2));
}
RaggedShape RaggedShape4(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2,
Array1<int32_t> *row_splits3,
Array1<int32_t> *row_ids3, int32_t cached_tot_size3) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape12 = RaggedShape3(row_splits1, row_ids1, cached_tot_size1,
row_splits2, row_ids2, cached_tot_size2);
Array1<int32_t> temp_array;
if (row_splits3 == nullptr) {
K2_CHECK_NE(row_ids3, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array =
Array1<int32_t>(row_ids3->Context(), shape12.NumElements() + 1);
row_splits3 = &temp_array;
RowIdsToRowSplits(*row_ids3, row_splits3);
}
return ComposeRaggedShapes(
shape12, RaggedShape2(row_splits3, row_ids3, cached_tot_size3));
}
RaggedShape RaggedShapeFromTotSizes(ContextPtr c, int32_t num_axes,
int32_t *tot_sizes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeLayer> axes(num_axes - 1);
// In future we might choose to allocate everything in one big array, to avoid
// multiple allocations, but for now just do it the simple way.
for (int32_t axis = 1; axis < num_axes; ++axis) {
axes[axis - 1].row_splits = Array1<int32_t>(c, tot_sizes[axis - 1] + 1);
axes[axis - 1].row_ids = Array1<int32_t>(c, tot_sizes[axis]);
axes[axis - 1].cached_tot_size = tot_sizes[axis];
}
// Not check here as we did not set the values of row_splits and row_ids
return RaggedShape(axes, false);
}
Array1<int32_t *> GetRowSplitsPtr(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
std::vector<int32_t *> row_splits_start(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
Array1<int32_t> &cur_splits = src.RowSplits(i);
row_splits_start[i - 1] = cur_splits.Data();
}
return Array1<int32_t *>(src.Context(), row_splits_start);
}
// See declaration in ragged.h for documentation of its purpose and interface.
RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) {
// If axis == 0, initial row_splits and row_ids will look like the following,
// if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes
// would be pushed forward.
//
// If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would
// look like the following, if for instance the src.TotSize(axis) = 8:
// [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ].
//
// The reason why the code is different for axis == 0, is that in that case we
// are really making visible an "implicit" axis of the input `src`; we could
// call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's
// row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from
// an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 <
// Dim0().
NVTX_RANGE(K2_FUNC);
ContextPtr c = src.Context();
K2_CHECK(axis >= 0 && axis <= src.NumAxes());
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
int32_t num_axes_in = src.NumAxes();
// Note: in RaggedShape, the vector of RaggedShapeLayer is of length
// num_axes - 1, so the output will have one more axis than the input.
std::vector<RaggedShapeLayer> axes_out(num_axes_in);
int32_t row_splits_dim, row_ids_dim;
Array1<int32_t> mem;
if (axis == 0) {
row_splits_dim = 2; // e.g. [ 0 5 ]
row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ]
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem, (int32_t i)->void {
if (i == 1)
mem_data[i] = row_ids_dim;
else
mem_data[i] = 0;
});
} else {
int32_t tot_size = src.TotSize(axis);
row_splits_dim = tot_size + 1;
row_ids_dim = tot_size;
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem2,
(int32_t i)->void { mem_data[i] = i % (tot_size + 1); });
}
axes_out[axis].row_splits = mem.Range(0, row_splits_dim);
axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim);
axes_out[axis].cached_tot_size = row_ids_dim;
for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i];
// Note: the returned array has `num_axes_in + 1` axes, so its
// array of RaggedShapeLayer is of length `num_axes_in`.
for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1];
return RaggedShape(axes_out);
}
std::vector<RaggedShape> UnsqueezeParallel(int32_t num_srcs, RaggedShape **src,
int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(axis, 0);
std::vector<RaggedShape> ans;
if (num_srcs == 0) return ans;
ans.reserve(num_srcs);
ContextPtr c = src[0]->Context();
std::vector<int32_t> all_row_splits_vec(num_srcs * 2);
int32_t max_dim = 0;
// all_row_splits_vec will contain [ 0 d0 0 d1 0 d2 .. ]
// where d0 == src[0]->Dim0(), d1 == src[1]->Dim0()..
for (int32_t i = 0; i < num_srcs; i++) {
int32_t this_dim0 = src[i]->Dim0();
if (this_dim0 > max_dim) max_dim = this_dim0;
all_row_splits_vec[i * 2] = 0;
all_row_splits_vec[i * 2 + 1] = this_dim0;
}
Array1<int32_t> all_row_splits(c, all_row_splits_vec);
Array1<int32_t> all_row_ids(c, max_dim, 0);
for (int32_t i = 0; i < num_srcs; i++) {
int32_t num_axes = src[i]->NumAxes();
std::vector<RaggedShapeLayer> axes;
axes.reserve(num_axes); // note, the size of the `layers` of a RaggedShape
// is its NumAxes() - 1.
axes.resize(1);
int32_t this_old_dim0 = all_row_splits_vec[i * 2 + 1];
axes[0].row_splits = all_row_splits.Range(i * 2, 2);
axes[0].row_ids = all_row_ids.Range(0, this_old_dim0);
axes[0].cached_tot_size = this_old_dim0;
axes.insert(axes.end(), src[i]->Layers().begin(), src[i]->Layers().end());
ans.emplace_back(axes);
}
return ans;
}
/*
Internal function used in Index(), which gets certain arrays used internally.
@param [in] src Source shape to be indexed
@param [in] src_row_splits_ptrs Result of calling GetRowSplitsPtr(src)
@param [in] new2old Array of indexes into axis 0 of src
@param [out] old_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()), whose (i,j)'th
element contains the offset into axis i of `src`
where the slice of `src` with index0 (i.e. index
into 0'th-axis of `src`) equal to `new2old[j]`
begins.
@param [out] new_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()+1), whose (i,j)'th
element contains the offset into axis i of `ans`
where the data in `ans` corresponding to
index j (i.e. index j into axis 0 of `ans`) begins.
Note: `ans` is the result of Index(), with
ans.Dim0() == new2old.Dim().
*/
inline void GetOldAndNewOffsets(RaggedShape &src,
const Array1<int32_t *> &src_row_splits_ptrs,
const Array1<int32_t> &new2old,
Array2<int32_t> *old_offsets,
Array2<int32_t> *new_offsets) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 1);
ContextPtr &c = src.Context();
int32_t num_axes = src.NumAxes(), ans_dim0 = new2old.Dim();
int32_t *const *src_row_splits_ptrs_data = src_row_splits_ptrs.Data();
const int32_t *new2old_data = new2old.Data();
*old_offsets = Array2<int32_t>(c, num_axes, ans_dim0);
*new_offsets = Array2<int32_t>(c, num_axes, ans_dim0 + 1);
auto old_offsets_acc = old_offsets->Accessor(),
new_offsets_acc = new_offsets->Accessor();
// Set old_offsets; and for now, set new_offsets to the corresponding
// sizes of the output slices.
K2_EVAL(
c, ans_dim0, lambda_set_offsets, (int32_t i)->void {
// 0 <= i < ans_dim0
int32_t old_offset = new2old_data[i], old_offset_next = old_offset + 1;
for (int32_t axis = 0;; axis++) {
old_offsets_acc(axis, i) = old_offset;
// Below, 'new_offsets_acc' currently contains the size rather
// than the offset; we need to do exclusive-sum.
new_offsets_acc(axis, i) = old_offset_next - old_offset;
if (axis + 1 == num_axes) return;
old_offset = src_row_splits_ptrs_data[axis][old_offset];
old_offset_next = src_row_splits_ptrs_data[axis][old_offset_next];
}
});
ExclusiveSum(*new_offsets, new_offsets);
}
RaggedShape Index(RaggedShape &src, const Array1<int32_t> &new2old,
Array1<int32_t> *elem_indexes /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
K2_CHECK(IsCompatible(src, new2old));
int32_t num_axes = src.NumAxes(), src_dim0 = src.Dim0(),
ans_dim0 = new2old.Dim();
if (ans_dim0 == 0) {
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, 0);
return EmptyRaggedShape(c, num_axes);
}
Array1<int32_t *> src_row_splits_ptrs = GetRowSplitsPtr(src);
Array2<int32_t> old_offsets, // num_axes by ans_dim0
new_offsets; // num_axes by (ans_dim0 + 1).
GetOldAndNewOffsets(src, src_row_splits_ptrs, new2old, &old_offsets,
&new_offsets);
Array1<int32_t> tot_sizes_out =
Array1<int32_t>(new_offsets.Col(ans_dim0)).To(GetCpuContext());
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, tot_sizes_out.Back());
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.Data());
auto old_offsets_acc = old_offsets.Accessor(),
new_offsets_acc = new_offsets.Accessor();
ParallelRunner pr(c);
std::vector<hipStream_t> streams(num_axes);
int32_t num_jobs = ans_dim0 * 2; // note: this formula is not a heuristic;
// it's how TaskRedirect works..
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *new_offsets_ptr = new_offsets_acc.Row(axis);
TaskRedirect *task_redirect_ptr = task_redirects_acc.Row(axis);
GetTaskRedirect(c, ans_dim0, new_offsets_ptr, task_redirect_ptr);
}
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
{
int32_t *this_new_row_splits = ans.RowSplits(axis + 1).Data();
const int32_t *this_old_row_splits = src.RowSplits(axis + 1).Data();
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// 0 <= ans_idx0 < ans_dim0; and 0 <= thread_idx < num_threads,
// num_threads may have any value > 0 as far as this code is concerned.
//
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_splits array are as given by
// the `axis`'th row of `offsets`; the values in the array
// are related to those in the `axis+1`'th row.
int32_t this_new_offset = new_offsets_acc(axis, ans_idx0),
next_new_offset = new_offsets_acc(axis, ans_idx0 + 1),
num_rows = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis, ans_idx0),
value_offset = new_offsets_acc(axis + 1, ans_idx0) -
old_offsets_acc(axis + 1, ans_idx0);
// Using <= instead of < below causes threads for different ans_idx0 to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_new_row_splits[this_new_offset + thread_idx] =
value_offset + this_old_row_splits[this_old_offset + thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
}
{
int32_t *this_new_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *this_old_row_ids = src.RowIds(axis + 1).Data();
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
if (elem_indexes == nullptr || axis != num_axes - 2) {
// If we don't need to write to `elem_indexes`... [caution: the next
// code block differs from this only by a statement that sets
// `elem_indexes` and they should be kept in sync.]
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_ids array are as given by
// the `axis+1`'th row of `offsets`; the values in the array
// are related to those in the `axis`'th row.
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
} else {
int32_t *elem_indexes_data = elem_indexes->Data();
// We need to write to `elem_indexes`. Note: this code block only
// differs from the above by an extra statement regarding
// `elem_indexes`. Comments have been removed.
auto lambda_set_row_ids_and_elem_indexes =
[=] __host__ __device__(int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
elem_indexes_data[this_new_offset + thread_idx] =
this_old_offset + thread_idx;
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops,
lambda_set_row_ids_and_elem_indexes);
}
}
}
#if !defined(NDEBUG)
ans.Check();
#endif
return ans;
}
Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) {
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
ContextPtr ctx = src[0]->Context();
Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1);
int32_t *src_offsets_data = src_offsets.Data();
int32_t src_offsets_stride0 = src_offsets.ElemStride0();
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
for (int32_t axis = 0; axis <= num_axes_in; ++axis) {
int32_t sum = 0;
for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column
src_offsets_data[axis * src_offsets_stride0 + i] = sum;
if (i < num_srcs) {
sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1));
}
}
}
return src_offsets;
}
void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits,
Array1<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
src.Populate();
std::vector<int32_t *> row_splits_ptrs(axes - 1);
std::vector<int32_t *> row_ids_ptrs(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
row_splits_ptrs[i - 1] = src.RowSplits(i).Data();
row_ids_ptrs[i - 1] = src.RowIds(i).Data();
}
ContextPtr ctx = src.Context();
*row_splits = Array1<int32_t *>(ctx, row_splits_ptrs);
*row_ids = Array1<int32_t *>(ctx, row_ids_ptrs);
}
void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src,
Array2<int32_t *> *row_splits,
Array2<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
K2_CHECK_GE(num_axes_in, 2);
ContextPtr ctx = src[0]->Context();
// check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
int32_t **splits_ptr_data = row_splits_ptrs.Data();
int32_t **ids_ptr_data = row_ids_ptrs.Data();
int32_t stride0 = row_splits_ptrs.ElemStride0();
K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0());
for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) {
for (int32_t i = 0; i != num_srcs; ++i) {
splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data();
ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data();
}
}
*row_splits = row_splits_ptrs.To(ctx);
*row_ids = row_ids_ptrs.To(ctx);
}
static RaggedShape AppendAxis0(int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* == nullptr*/) {
NVTX_RANGE(K2_FUNC);
if (num_srcs == 1) {
if (merge_map)
*merge_map = Arange<uint32_t>(src[0]->Context(), 0,
src[0]->NumElements());
return **src;
}
K2_CHECK_GT(num_srcs, 1);
int32_t num_axes = src[0]->NumAxes();
ContextPtr c = src[0]->Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes, src[i]->NumAxes());
K2_CHECK(IsCompatible(*src[0], *src[i]));
}
// `offsets` will be on CPU for now.
Array2<int32_t> offsets = GetOffsets(num_srcs, src);
auto offsets_acc = offsets.Accessor();
std::vector<int32_t> tot_sizes_out(num_axes);
for (int32_t axis = 0; axis < num_axes; ++axis)
tot_sizes_out[axis] = offsets_acc(axis + 1, num_srcs);
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes,
tot_sizes_out.data());
Array2<int32_t *> src_row_splits, src_row_ids;
GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids);
auto src_row_splits_acc = src_row_splits.Accessor(),
src_row_ids_acc = src_row_ids.Accessor();
offsets = offsets.To(c);
offsets_acc = offsets.Accessor(); // on GPU now (if we're using one)
ParallelRunner pr(c);
std::vector<hipStream_t> streams(num_axes);
int32_t num_jobs = num_srcs * 2;
// task_redirects is a device array (if using GPU).
// We have `num_axes - 1` different sets of row_splits/row_ids to
// populate but they have different sizes; the total number of distinct
// sizes is `num_axes`.
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
// populate task_redirects (these allocate blocks of threads roughly
// proportionally to the amount of data to process from this source.
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *offsets = offsets_acc.Row(axis + 1);
// c->GetCudaStream() == stream[axis] as it has been overridden by With
GetTaskRedirect(c, num_srcs, offsets, task_redirects_acc.Row(axis));
}
for (int32_t axis = 0; axis < num_axes - 1; axis++) {
// first set the row-splits.
int32_t **this_src_row_splits = src_row_splits_acc.Row(axis),
**this_src_row_ids = src_row_ids_acc.Row(axis);
int32_t *this_dest_row_splits = ans.RowSplits(axis + 1).Data(),
*this_dest_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *offsets_this_axis = offsets_acc.Row(axis + 1),
*offsets_next_axis = offsets_acc.Row(axis + 2);
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_this_axis[src_idx],
next_offset = offsets_this_axis[src_idx + 1],
this_value_offset = offsets_next_axis[src_idx],
num_rows = next_offset - this_offset;
int32_t *src_row_splits_ptr = this_src_row_splits[src_idx];
// Using <= instead of < below causes threads for different src_idx to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_dest_row_splits[this_offset + thread_idx] =
this_value_offset + src_row_splits_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
{ // set the row-ids
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_next_axis[src_idx],
next_offset = offsets_next_axis[src_idx + 1],
this_value_offset = offsets_this_axis[src_idx],
num_elems = next_offset - this_offset;
int32_t *src_row_ids_ptr = this_src_row_ids[src_idx];
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_dest_row_ids[this_offset + thread_idx] =
this_value_offset + src_row_ids_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis + 1],
target_num_loops = (tot_work > 1000000 ? 4 : 2);
// TODO(haowen): maybe we should launch kernels for row_splits and row_ids
// in different streams
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
}
}
if (merge_map) {
std::vector<int32_t> num_elems_out(num_srcs);
for (int32_t i = 0; i < num_srcs; ++i)
num_elems_out[i] = src[i]->NumElements();
*merge_map = SizesToMergeMap(c, num_elems_out);
}
return ans;
}
RaggedShape Append(int32_t axis, int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* == nullptr*/) {
K2_CHECK(num_srcs > 0);
if (axis == 0)
return AppendAxis0(num_srcs, src, merge_map);
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
int32_t num_axes = src[0]->NumAxes();
std::vector<RaggedShapeLayer> ans_layers(num_axes - 1);
// If axis >= 2, some layers of `src` will pass through unchanged (we should
// check that they are identical across all sources).
for (int32_t l = 0; l + 1 < axis; l++) {
CheckLayerEqual(l, num_srcs, src);
ans_layers[l] = src[0]->Layers()[l];
}
Array1<uint32_t> merge_map_local;
Array1<uint32_t> *this_m = (axis + 1 == num_axes ? merge_map :
&merge_map_local);
RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m),
t = SubsampleRaggedLayer(s, 0, num_srcs);
ans_layers[axis - 1] = t.Layers()[0];
for (int32_t l = axis; l + 1 < num_axes; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m = (l + 2 == num_axes ? merge_map :
&merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src,
merge_map_local, this_m);
ans_layers[l] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
K2_CHECK(axis >= 0 && axis < src.NumAxes());
// note, `axes_in` is of dim src.NumAxes() - 1.
// Also note: axes_in[i] pertains to the relationship between
// axes i and i+1 in the source.
src.Populate();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(axes_in.size() - 1);
int32_t axes_out_size = static_cast<int32_t>(axes_out.size());
for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i];
if (axis > 0 && axis + 1 < src.NumAxes()) {
axes_out[axis - 1].row_ids =
axes_in[axis - 1].row_ids[axes_in[axis].row_ids];
axes_out[axis - 1].row_splits =
axes_in[axis].row_splits[axes_in[axis - 1].row_splits];
axes_out[axis - 1].cached_tot_size = axes_out[axis - 1].row_ids.Dim();
}
for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1];
return RaggedShape(axes_out);
}
RaggedShape MakeTransposable(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 1) return src;
ContextPtr c = src.Context();
int32_t num_axes = src.NumAxes();
int32_t max_size = src.MaxSize(1);
if (max_size <= 0) return src;
int32_t ans_tot_size1 = max_size * src_dim0;
src.Populate();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(num_axes - 1);
const int32_t *src_row_splits1_data = src.RowSplits(1).Data();
const int32_t *src_row_ids1_data = src.RowIds(1).Data();
{
ParallelRunner pr(c);
RaggedShapeLayer &axis1_shape = axes_out[0];
{
// set ans.RowSplits(1);
With w(pr.NewStream());
axis1_shape.row_splits = Range(c, src_dim0 + 1, 0, max_size);
}
{
// set ans.RowIds(1);
With w(pr.NewStream());
axis1_shape.row_ids = Array1<int32_t>(c, ans_tot_size1);
int32_t *row_ids1_data = axis1_shape.row_ids.Data();
axis1_shape.cached_tot_size = ans_tot_size1;
K2_EVAL(
c, ans_tot_size1, lambda_set_row_ids1,
(int32_t i)->void { row_ids1_data[i] = i / max_size; });
}
if (num_axes > 2) {
RaggedShapeLayer &axis2_shape = axes_out[1];
const int32_t *src_row_splits2_data = src.RowSplits(2).Data();
{
// set ans.RowSplits(2);
With w(pr.NewStream());
axis2_shape.cached_tot_size = src.TotSize(2);
axis2_shape.row_splits = Array1<int32_t>(c, ans_tot_size1 + 1);
int32_t *ans_row_splits2_data = axis2_shape.row_splits.Data();
K2_EVAL(
c, ans_tot_size1 + 1, lambda_set_row_splits2,
(int32_t idx01)->void {
if (idx01 == ans_tot_size1) {
ans_row_splits2_data[idx01] =
src_row_splits2_data[src_tot_size1];
return;
}
int32_t idx0 = idx01 / max_size, idx1 = idx01 % max_size;
int32_t idx0x = src_row_splits1_data[idx0],
idx0x_next = src_row_splits1_data[idx0 + 1];
int32_t num_elems_this_row = idx0x_next - idx0x;
if (idx1 < num_elems_this_row)
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x + idx1];
else
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x_next]; // append empty row
});
}
{
// set ans.RowIds(2);
With w(pr.NewStream());
int32_t tot_size2 = src.TotSize(2);
axis2_shape.row_ids = Array1<int32_t>(c, tot_size2);
int32_t *ans_row_ids2_data = axis2_shape.row_ids.Data();
const int32_t *src_row_ids2_data = src.RowIds(2).Data();
K2_EVAL(
c, tot_size2, lambda_set_row_ids2, (int32_t idx012)->void {
int32_t src_idx01 = src_row_ids2_data[idx012];
int32_t src_idx0 = src_row_ids1_data[src_idx01];
int32_t src_idx1 = src_idx01 - src_row_splits1_data[src_idx0];
ans_row_ids2_data[idx012] = (src_idx0 * max_size) + src_idx1;
});
}
}
}
// copy left row_splits and row_ids;
for (int32_t i = 2; i < num_axes - 1; ++i) axes_out[i] = axes_in[i];
return RaggedShape(axes_out);
}
// transpose axes 0 and 1.
RaggedShape Transpose(RaggedShape &src, Array1<int32_t> *value_indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
ContextPtr c = src.Context();
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 0) return src;
int32_t src_dim1 = src_tot_size1 / src_dim0;
K2_CHECK_EQ(src_tot_size1 % src_dim0, 0)
<< "Transpose(): all dims on axis 0 must be the same.\n"
<< "src_tot_size1: " << src_tot_size1 << "\n"
<< "src_dim0: " << src_dim0 << ", array is: " << src;
K2_DCHECK(
Equal(src.RowSplits(1), Range(c, src.RowSplits(1).Dim(), 0, src_dim1)))
<< " Expected row-splits to be evenly spaced: " << src.RowSplits(1);
RaggedShape src_no_axis0 = RemoveAxis(src, 0);
K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1);
// `renumbering` is a `new2old` map, that maps from the first index in
// src_no_axis0_renumbered
// to the first index into src_no_axis0.
Array1<int32_t> renumbering(c, src_tot_size1);
int32_t *renumbering_data = renumbering.Data();
K2_EVAL(
c, src_tot_size1, lambda_set_renumbering, (int32_t i)->void {
int32_t j = i % src_dim0, k = i / src_dim0, i_old = j * src_dim1 + k;
renumbering_data[i] = i_old;
});
RaggedShape src_no_axis0_renumbered =
Index(src_no_axis0, renumbering, value_indexes);
int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1,
row_ids_dim = src_tot_size1;
std::vector<RaggedShapeLayer> ans_axis0(1);
Array1<int32_t> mem(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, row_splits_dim + row_ids_dim, lambda_set_row_info, (int32_t i)->void {
int32_t val;
if (i >= row_splits_dim) {
// row_ids
int32_t elem_idx = i - row_splits_dim;
val = elem_idx / src_dim0;
} else {
// row_splits
int32_t row_idx = i;
val = row_idx * src_dim0;
}
mem_data[i] = val;
});
ans_axis0[0].row_splits = mem.Range(0, row_splits_dim);
ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim);
ans_axis0[0].cached_tot_size = row_ids_dim;
RaggedShape temp(ans_axis0);
return ComposeRaggedShapes(temp, src_no_axis0_renumbered);
}
RaggedShape Stack(int32_t axis, int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* = nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
ContextPtr c = src[0]->Context();
if (axis == 0) {
RaggedShape ans_appended = AppendAxis0(num_srcs, src, merge_map);
ContextPtr cpu = GetCpuContext();
Array1<int32_t> row_splits(cpu, num_srcs + 1);
int32_t *row_splits_data = row_splits.Data();
for (int32_t i = 0; i < num_srcs; i++)
row_splits_data[i] = src[i]->Dim0();
int32_t cutoff = 32;
if (num_srcs < cutoff) row_splits = row_splits.To(c);
ExclusiveSum(row_splits, &row_splits);
if (num_srcs >= cutoff) row_splits = row_splits.To(c);
int32_t num_elems = ans_appended.Dim0();
Array1<int32_t> row_ids(c, num_elems);
RowSplitsToRowIds(row_splits, &row_ids);
RaggedShape ans_layer0 = RaggedShape2(&row_splits, &row_ids, num_elems);
return ComposeRaggedShapes(ans_layer0, ans_appended);
}
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
int32_t num_axes = src[0]->NumAxes();
std::vector<RaggedShapeLayer> ans_layers(num_axes);
// If axis >= 2, some layers of `src` will pass through unchanged (we should
// check that they are identical across all sources).
for (int32_t l = 0; l + 1 < axis; l++) {
CheckLayerEqual(l, num_srcs, src);
ans_layers[l] = src[0]->Layers()[l];
}
Array1<uint32_t> merge_map_local;
Array1<uint32_t> *this_m = (axis + 1 == num_axes ? merge_map :
&merge_map_local);
RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m);
// note: s.Dim0() will be a multiple of num_srcs.
ans_layers[axis - 1] = RegularRaggedShape(c, s.Dim0() / num_srcs,
num_srcs).Layers()[0];
ans_layers[axis] = s.Layers()[0];
for (int32_t l = axis; l + 1 < num_axes; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m = (l + 2 == num_axes ? merge_map :
&merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src,
merge_map_local, this_m);
ans_layers[l + 1] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
RaggedShape Merge(int32_t num_srcs,
RaggedShape **src,
const Array1<uint32_t> &merge_map,
Array1<uint32_t> *merge_map_out) {
K2_CHECK(num_srcs > 0);
int32_t num_layers = src[0]->NumAxes() - 1;
std::vector<RaggedShapeLayer> ans_layers(num_layers);
// Note: this is a shallow copy.
Array1<uint32_t> merge_map_local = merge_map;
for (int32_t l = 0; l < num_layers; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m = (l + 1 == num_layers ? merge_map_out :
&merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m);
ans_layers[l] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) {
NVTX_RANGE(K2_FUNC);
// row_splits= [
Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems);
Array1<int32_t> row_ids(c, num_elems, 0);
return RaggedShape2(&row_splits, &row_ids, num_elems);
}
RaggedShape RegularRaggedShape(ContextPtr &c, int32_t dim0, int32_t dim1) {
NVTX_RANGE(K2_FUNC);
Array1<int32_t> row_splits = Range<int32_t>(c, dim0 + 1, 0, dim1);
int32_t *row_splits_data = row_splits.Data();
Array1<int32_t> row_ids(c, dim0 * dim1);
int32_t *row_ids_data = row_ids.Data();
K2_EVAL2(
c, dim0, dim1, lambda_set_row_ids,
(int32_t i, int32_t j)->void { row_ids_data[i * dim1 + j] = i; });
return RaggedShape2(&row_splits, &row_ids, dim0 * dim1);
}
Ragged<int32_t> GetCountsPartitioned(Ragged<int32_t> &src,
RaggedShape &ans_ragged_shape) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_EQ(ans_ragged_shape.NumAxes(), 2);
K2_CHECK(IsCompatible(src, ans_ragged_shape));
K2_CHECK_EQ(src.Dim0(), ans_ragged_shape.Dim0());
const Array1<int32_t> &values = src.values;
const Array1<int32_t> &row_splits = ans_ragged_shape.RowSplits(1);
int32_t n = ans_ragged_shape.NumElements();
Array1<int32_t> counts = GetCounts(values, n);
return Ragged<int32_t>(ans_ragged_shape, counts);
}
static Array1<int32_t> GetTransposeReorderingCpu(Ragged<int32_t> &src,
int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
std::vector<std::vector<int32_t>> column_indexes(num_cols); // [column][row]
const int32_t *values_data = src.values.Data();
int32_t n = src.values.Dim();
for (int32_t i = 0; i != n; ++i) {
int32_t bucket = values_data[i];
column_indexes[bucket].push_back(i);
}
Array1<int32_t> ans(src.Context(), n);
int32_t *ans_data = ans.Data();
for (int32_t i = 0; i != num_cols; ++i) {
std::copy(column_indexes[i].begin(), column_indexes[i].end(), ans_data);
ans_data += column_indexes[i].size();
}
return ans;
}
static Array1<int32_t> GetTransposeReorderingThreeAxesCuda(Ragged<int32_t> &src,
int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 3);
ContextPtr &context = src.Context();
K2_CHECK_EQ(context->GetDeviceType(), kCuda);
const Array1<int32_t> &row_splits1 = src.RowSplits(1);
const int32_t *row_ids2_data = src.RowIds(2).Data();
const int32_t *value_data = src.values.Data();
Array1<int32_t> segments = src.RowSplits(2)[row_splits1];
auto lambda_comp = [=] __device__(int32_t a_idx012,
int32_t b_idx012) -> bool {
int32_t a_col_index = value_data[a_idx012];
int32_t b_col_index = value_data[b_idx012];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// at this point, a_idx012 and b_idx012 belong to the same column;
// then we sort by its row indexes
int32_t a_idx01 = row_ids2_data[a_idx012];
int32_t b_idx01 = row_ids2_data[b_idx012];
if (a_idx01 < b_idx01) return true;
if (a_idx01 > b_idx01) return false;
// at this point, a_idx012 and b_idx012 are duplicate elements
return false; // either true or false is fine
};
mgpu::context_t *mgpu_context = GetModernGpuAllocator(context);
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
K2_CUDA_SAFE_CALL(mgpu::segmented_sort(ans.Data(), // keys
ans.Dim(), // count
segments.Data(), // segments
segments.Dim() - 1, // num_segments
lambda_comp, *mgpu_context));
return ans;
}
Array1<int32_t> GetTransposeReordering(Ragged<int32_t> &src, int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
ContextPtr &context = src.Context();
if (src.NumAxes() < 2) {
// src is empty
return Array1<int32_t>(context, 0);
}
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) return GetTransposeReorderingCpu(src, num_cols);
K2_CHECK_EQ(device_type, kCuda);
if (src.NumAxes() == 3)
return GetTransposeReorderingThreeAxesCuda(src, num_cols);
const int32_t *row_splits1_data = src.RowSplits(src.NumAxes() - 1).Data();
const int32_t *row_ids1_data = src.RowIds(src.NumAxes() - 1).Data();
const int32_t *value_data = src.values.Data();
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
auto lambda_comp = [=] __device__(int32_t a_idx01, int32_t b_idx01) -> bool {
int32_t a_idx0 = row_ids1_data[a_idx01];
int32_t b_idx0 = row_ids1_data[b_idx01];
int32_t a_col_index = value_data[a_idx01];
int32_t b_col_index = value_data[b_idx01];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// now we have a_col_index == b_col_index
if (a_idx0 < b_idx0) return true; // sort by row indexes
if (a_idx0 > b_idx0) return false;
// now we have a_idx0 == b_idx0 && a_col_index == b_col_index
// this entry is duplicated in the sparse matrix.
return false; // we can return either true or false here.
};
mgpu::context_t *mgpu_context = GetModernGpuAllocator(context);
K2_CUDA_SAFE_CALL(mgpu::mergesort(ans.Data(), n, lambda_comp, *mgpu_context));
return ans;
}
RaggedShape ChangeSublistSize(RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeLayer axes).
std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1),
src_num_elems = src.TotSize(last_axis),
num_elems = src_num_elems + size_delta * num_rows;
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
ans_axes.back().row_ids = Array1<int32_t>(c, num_elems);
ans_axes.back().cached_tot_size = num_elems;
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(),
*src_row_ids_data = src.RowIds(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data(),
*row_ids_data = ans_axes.back().row_ids.Data();
{
ParallelRunner pr(c);
{
With w(pr.NewStream());
K2_EVAL(
c, num_rows + 1, lambda_set_row_splits, (int32_t idx0)->void {
row_splits_data[idx0] =
src_row_splits_data[idx0] + size_delta * idx0;
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, src_num_elems, lambda_set_row_ids1, (int32_t src_idx01)->void {
int32_t src_idx0 = src_row_ids_data[src_idx01],
src_idx0x = src_row_splits_data[src_idx0],
src_idx1 = src_idx01 - src_idx0x,
new_idx0x = row_splits_data[src_idx0],
new_idx0x_next = row_splits_data[src_idx0 + 1],
new_idx01 = new_idx0x + src_idx1;
// it's only necessary to guard the next statement with in 'if'
// because size_delta might be negative.
if (new_idx01 < new_idx0x_next) row_ids_data[new_idx01] = src_idx0;
});
}
if (size_delta > 0) {
// This sets the row-ids that are not set by lambda_set_row_ids1.
With w(pr.NewStream());
K2_EVAL(
c, num_rows * size_delta, lambda_set_row_ids2, (int32_t i)->void {
int32_t idx0 = i / size_delta, n = i % size_delta,
next_idx0 = idx0 + 1;
// The following formula is the same as the one in
// lambda_set_row_splits; we want to compute the new value of
// row_splits_data[next_idx0] without waiting for that kernel to
// terminate.
int32_t next_idx0x =
src_row_splits_data[next_idx0] + size_delta * next_idx0;
row_ids_data[next_idx0x - 1 - n] = idx0;
});
}
// make the ParallelRunner go out of scope (should do this before any
// validation code that gets invoked by the constructor of RaggedShape
// below).
}
return RaggedShape(ans_axes);
}
// TODO(dan): this could definitely be made more efficient.
RaggedShape ChangeSublistSizePinned(RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeLayer axes).
std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1);
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data();
K2_EVAL(
c, num_rows, lambda_set_row_sizes, (int32_t idx0)->void {
int32_t orig_size =
src_row_splits_data[idx0 + 1] - src_row_splits_data[idx0],
size;
if (orig_size == 0 || orig_size + size_delta <= 0)
size = 0;
else
size = orig_size + size_delta;
row_splits_data[idx0] = size;
});
ExclusiveSum(ans_axes.back().row_splits, &ans_axes.back().row_splits);
ans_axes.back().row_ids =
Array1<int32_t>(c, ans_axes.back().row_splits.Back());
RowSplitsToRowIds(ans_axes.back().row_splits, &ans_axes.back().row_ids);
ans_axes.back().cached_tot_size = ans_axes.back().row_ids.Dim();
return RaggedShape(ans_axes);
}
RaggedShape Prefix(RaggedShape &src, int32_t n) {
NVTX_RANGE(K2_FUNC);
int32_t dim0 = src.Dim0();
K2_CHECK(n >= 0 && n <= dim0);
src.Populate();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(axes_in.size());
int32_t row_end = n;
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits = axes_in[axis].row_splits.Arange(0, row_end + 1);
// notice here we may do a memory copy from GPU to CPU.
row_end = axes_in[axis].row_splits[row_end];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
return RaggedShape(axes_out);
}
std::vector<RaggedShape> GetPrefixes(RaggedShape &src,
const std::vector<int32_t> &sizes) {
NVTX_RANGE(K2_FUNC);
src.Populate();
int32_t dim0 = src.Dim0();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
ContextPtr &c = src.Context();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
// get those row_end elements at each axis.
int32_t ans_size = static_cast<int32_t>(sizes.size());
Array1<int32_t> row_ends(c, num_axes * ans_size);
Array1<int32_t> sizes_array(GetCpuContext(), sizes);
Array1<int32_t> indexes = row_ends.Arange(0, ans_size);
indexes.CopyFrom(sizes_array);
for (int32_t axis = 1; axis < num_axes; ++axis) {
Array1<int32_t> curr_axis_row_ends =
row_ends.Arange(axis * ans_size, (axis + 1) * ans_size);
axes_in[axis - 1].row_splits.Index(indexes, &curr_axis_row_ends);
indexes = curr_axis_row_ends;
}
row_ends = row_ends.To(GetCpuContext());
std::vector<RaggedShape> ans(ans_size);
for (int32_t i = 0; i != ans_size; ++i) {
std::vector<RaggedShapeLayer> axes_out(axes_in.size());
int32_t row_end = row_ends[i];
K2_CHECK(row_end >= 0 && row_end <= dim0);
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits =
axes_in[axis].row_splits.Arange(0, row_end + 1);
row_end = row_ends[i + (axis + 1) * ans_size];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
ans[i] = RaggedShape(axes_out, false);
}
return ans;
}
Ragged<int32_t> AddSuffixToRagged(Ragged<int32_t> &src,
const Array1<int32_t> &suffix) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
K2_CHECK_EQ(suffix.Dim(), src.TotSize(num_axes - 2));
ContextPtr &c = src.Context();
Array1<int32_t> dst_values(c, src.NumElements() + suffix.Dim());
RaggedShape dst_shape = ChangeSublistSize(src.shape, 1);
// "row_splits1" and "row_ids1" below are actually on the last axis. We name
// them with "1" so that we can use "idx01" and "idx0" for those indexes in
// lambda, following the naming convention explained in k2/csrc/utils.h
const int32_t *dst_row_splits1_data =
dst_shape.RowSplits(num_axes - 1).Data(),
*dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(),
*src_values_data = src.values.Data(),
*suffix_data = suffix.Data();
int32_t *dst_values_data = dst_values.Data();
K2_EVAL(
c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void {
int32_t idx0 = dst_row_ids1_data[idx01];
if (idx01 == dst_row_splits1_data[idx0 + 1] - 1) {
// idx01 points to the last element of this row; copy from suffix
dst_values_data[idx01] = suffix_data[idx0];
} else {
// copy from src
int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01];
dst_values_data[idx01] = src_values_data[src_idx01];
}
});
return Ragged<int32_t>(dst_shape, dst_values);
}
Ragged<int32_t> AddPrefixToRagged(Ragged<int32_t> &src,
const Array1<int32_t> &prefix) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
K2_CHECK_EQ(prefix.Dim(), src.TotSize(num_axes - 2));
ContextPtr &c = src.Context();
Array1<int32_t> dst_values(c, src.NumElements() + prefix.Dim());
RaggedShape dst_shape = ChangeSublistSize(src.shape, 1);
// "row_splits1" and "row_ids1" below are actually on the last axis. We name
// them with "1" so that we can use "idx01" and "idx0" for those indexes in
// lambda, following the naming convention explained in k2/csrc/utils.h
const int32_t *dst_row_splits1_data =
dst_shape.RowSplits(num_axes - 1).Data(),
*dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(),
*src_values_data = src.values.Data(),
*prefix_data = prefix.Data();
int32_t *dst_values_data = dst_values.Data();
K2_EVAL(
c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void {
int32_t idx0 = dst_row_ids1_data[idx01];
if (idx01 == dst_row_splits1_data[idx0]) {
// idx01 points to the first element of this row; copy from prefix
dst_values_data[idx01] = prefix_data[idx0];
} else {
// copy from src
int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01] - 1;
dst_values_data[idx01] = src_values_data[src_idx01];
}
});
return Ragged<int32_t>(dst_shape, dst_values);
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &renumbering) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(renumbering.NumOldElems(), src.NumElements());
// Make sure final row-ids are populated.
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeLayer> axes = src.Layers();
axes.back().row_ids = axes.back().row_ids[renumbering.New2Old()];
axes.back().row_splits = renumbering.Old2New()[axes.back().row_splits];
axes.back().cached_tot_size = axes.back().row_ids.Dim();
return RaggedShape(axes);
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &r_before_last,
Renumbering &r_last) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(r_before_last.NumOldElems(), src.TotSize(src.NumAxes() - 2));
K2_CHECK_EQ(r_last.NumOldElems(), src.NumElements());
// Make sure final and before-final row-ids are populated.
src.RowIds(src.NumAxes() - 2);
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeLayer> axes = src.Layers();
// Suppose this shape has 3 axes (0,1,2). Its NumAxes()==3;
// axes.size()==2.
// r_before_last deals with the numbering on axis 1.
// r_last deals with the numbering on axis 2.
RaggedShapeLayer &before_last = axes[axes.size() - 2],
&last = axes[axes.size() - 1];
int32_t new_tot_size1 = r_before_last.NumNewElems(),
new_tot_size2 = r_last.NumNewElems();
ContextPtr c = src.Context();
Array1<int32_t> before_last_row_ids(c, new_tot_size1),
last_row_splits(c, new_tot_size1 + 1), last_row_ids(c, new_tot_size2);
// The variable names below use this 3-axis assumption but the
// code will work for greater number of axes.
int32_t *new_row_ids1_data = before_last_row_ids.Data(),
*new_row_splits2_data = last_row_splits.Data(),
*new_row_ids2_data = last_row_ids.Data();
const int32_t *old_row_ids1_data = before_last.row_ids.Data(),
*old_row_splits2_data = last.row_splits.Data(),
*old_row_ids2_data = last.row_ids.Data();
const int32_t *idx01_new2old_data = r_before_last.New2Old().Data(),
*idx01_old2new_data = r_before_last.Old2New().Data(),
*idx012_new2old_data = r_last.New2Old().Data(),
*idx012_old2new_data = r_last.Old2New().Data();
ParallelRunner pr(c);
{
With w(pr.NewStream());
// before_last.row_splits maps from idx0 -> idx01 (contains idx01's). Map
// the idx01's; the idx0s stay the same.
before_last.row_splits = r_before_last.Old2New()[before_last.row_splits];
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size1 + 1, lambda_set_row_ids1_and_row_splits2,
(int32_t new_idx01)->void {
// row_ids1 maps from idx01 -> idx0. Select subset of
// idx01's; the idx0 stays the same.
int32_t old_idx01 = idx01_new2old_data[new_idx01];
if (new_idx01 < new_tot_size1)
new_row_ids1_data[new_idx01] = old_row_ids1_data[old_idx01];
// row_splits2 maps from idx01 -> idx012. Map both indexes.
// idx01's; the idx0 stays the same.
new_row_splits2_data[new_idx01] =
idx012_old2new_data[old_row_splits2_data[old_idx01]];
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size2, lambda_set_row_ids2, (int32_t new_idx012)->void {
// row_ids2 maps from idx012 -> idx01. Both must be mapped.
int32_t old_idx012 = idx012_new2old_data[new_idx012];
int32_t old_idx01 = old_row_ids2_data[old_idx012],
new_idx01 = idx01_old2new_data[old_idx01];
new_row_ids2_data[new_idx012] = new_idx01;
});
}
before_last.row_ids = before_last_row_ids;
before_last.cached_tot_size = new_tot_size1;
last.row_splits = last_row_splits;
last.row_ids = last_row_ids;
last.cached_tot_size = new_tot_size2;
return RaggedShape(axes);
}
RaggedShape EmptyRaggedShape(ContextPtr &c, int32_t num_axes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeLayer> axes(num_axes - 1);
axes[0].row_splits = Array1<int32_t>(c, 1, 0);
// row_ids will be the empty vector, with context `c`.
axes[0].row_ids = axes[0].row_splits.Range(0, 0);
axes[0].cached_tot_size = 0;
for (int32_t a = 1; a + 1 < num_axes; a++) axes[a] = axes[0];
return RaggedShape(axes);
}
Array1<int32_t> GetDecreasingSizeOrder(RaggedShape &shape) {
ContextPtr c = shape.Context();
Array1<int32_t> sizes = RowSplitsToSizes(shape.RowSplits(1));
Array1<int32_t> index_map;
Sort<int32_t, GreaterThan<int32_t>>(&sizes, &index_map);
return index_map;
}
RaggedShape GetLayer(const RaggedShape &src, int32_t layer) {
K2_CHECK_GE(layer, 0);
K2_CHECK_LT(layer, src.NumAxes() - 1);
std::vector<RaggedShapeLayer> layers;
layers.push_back(src.Layers()[layer]);
bool check = false;
return RaggedShape(layers, check);
}
void DecomposeRaggedShape(const RaggedShape &src,
int32_t axis,
RaggedShape *top, RaggedShape *bottom) {
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, src.NumAxes() - 1);
const std::vector<RaggedShapeLayer> &src_layers = src.Layers();
std::vector<RaggedShapeLayer> top_layers(axis),
bottom_layers(src_layers.size() - axis);
int32_t src_size = src_layers.size();
for (int32_t i = 0; i < axis; ++i)
top_layers[i] = src_layers[i];
for (int32_t i = axis; i < src_size; ++i)
bottom_layers[i - axis] = src_layers[i];
*top = RaggedShape(top_layers);
*bottom = RaggedShape(bottom_layers);
}
RaggedShape RemoveEmptyLists(RaggedShape &src_shape,
int32_t axis,
Renumbering *renumbering_out) {
if (axis == 0) {
return RemoveEmptyListsAxis0(src_shape, renumbering_out);
}
RaggedShape top_shape, bottom_shape;
DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape);
Renumbering r_temp;
if (!renumbering_out)
renumbering_out = &r_temp;
bottom_shape = RemoveEmptyListsAxis0(bottom_shape, renumbering_out);
top_shape = SubsampleRaggedShape(top_shape, *renumbering_out);
return ComposeRaggedShapes(top_shape, bottom_shape);
}
RaggedShape RemoveSomeEmptyLists(RaggedShape &src_shape,
int32_t axis,
Renumbering &renumbering) {
if (axis == 0) {
return RenumberAxis0Simple(src_shape, renumbering);
}
RaggedShape top_shape, bottom_shape;
DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape);
bottom_shape = RenumberAxis0Simple(bottom_shape, renumbering);
top_shape = SubsampleRaggedShape(top_shape, renumbering);
return ComposeRaggedShapes(top_shape, bottom_shape);
}
RaggedShape RemoveEmptyListsAxis0(RaggedShape &src_shape,
Renumbering *renumbering_out) {
Renumbering r_temp;
if (!renumbering_out)
renumbering_out = &r_temp;
ContextPtr c = src_shape.Context();
int32_t num_lists = src_shape.Dim0();
*renumbering_out = Renumbering(c, num_lists);
int32_t *row_splits_data = src_shape.RowSplits(1).Data();
char *keep_data = renumbering_out->Keep().Data();
K2_EVAL(c, num_lists + 1, lambda_set_keep, (int32_t i) -> void {
keep_data[i] = (row_splits_data[i+1] != row_splits_data[i]);
});
return RenumberAxis0Simple(src_shape, *renumbering_out);
}
RaggedShape RenumberAxis0Simple(RaggedShape &src_shape,
Renumbering &renumbering) {
K2_CHECK_EQ(renumbering.NumOldElems(), src_shape.Dim0());
ContextPtr c = src_shape.Context();
src_shape.RowIds(1); // make sure RowIds(1) is populated.
std::vector<RaggedShapeLayer> layers = src_shape.Layers();
int32_t num_layers = layers.size();
int32_t new_num_lists = renumbering.NumNewElems(),
num_elems = src_shape.TotSize(1); // unchanged old vs. new.
Array1<int32_t> new_row_splits(c, new_num_lists + 1),
new_row_ids = renumbering.Old2New()[src_shape.RowIds(1)];
int32_t *new_row_splits_data = new_row_splits.Data();
const int32_t *old_row_splits_data = src_shape.RowSplits(1).Data(),
*new2old_data = renumbering.New2Old().Data();
// set `new_row_splits_data`.
#ifndef NDEBUG
{
Array1<int32_t> is_ok(c, 1, 1);
int32_t *is_ok_data = is_ok.Data();
int32_t old_num_lists = src_shape.Dim0();
const int32_t *old2new_data = renumbering.Old2New().Data();
K2_EVAL(c, old_num_lists, lambda_check_preconditions, (int32_t i) -> void {
if (old2new_data[i+1] == old2new_data[i]) { // This list not kept
if (old_row_splits_data[i+1] != old_row_splits_data[i]) {
// this list was nonempty...
is_ok_data[0] = 0;
}
}
});
K2_CHECK_NE(is_ok[0], 0) << "RenumberAxis0Simple(): preconditions not met; "
"renumbering removes nonempty lists.";
}
#endif
K2_EVAL(c, new_num_lists + 1, lambda_set_new_row_splits, (int32_t new_i) -> void {
int32_t j;
if (new_i == new_num_lists) {
j = num_elems;
} else {
int32_t old_i = new2old_data[new_i];
j = old_row_splits_data[old_i];
}
new_row_splits_data[new_i] = j;
});
layers[0].row_splits = new_row_splits;
layers[0].row_ids = new_row_ids;
// no need to set its cached_tot_size; that didn't change.
return RaggedShape(layers);
}
} // namespace k2
| edda540bfbbae9ea128fb81dddd535ef41bc47bd.cu | /**
* @brief
* ragged_ops
*
* @copyright
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
* Mobvoi Inc. (authors: Fangjun Kuang)
* Yiming Wang
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <memory>
#include <vector>
#include "cub/cub.cuh"
#include "k2/csrc/array_ops.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/moderngpu_allocator.h"
#include "k2/csrc/ragged.h"
#include "k2/csrc/ragged_ops.h"
#include "k2/csrc/ragged_utils.h"
#include "moderngpu/kernel_mergesort.hxx"
namespace {
/*
A helper function used in RaggedShape3;
if both first and second are non-NULL, it will check if the context of them
is compatible or not and return that context if compatible;
if one of them is NULL, returns the other one's context.
*/
static k2::ContextPtr GetContext(const k2::Array1<int32_t> *first,
const k2::Array1<int32_t> *second) {
K2_CHECK(first != nullptr || second != nullptr)
<< "At least one of first and second must be non-NULL";
if (first == nullptr)
return second->Context();
else if (second == nullptr)
return first->Context();
else
return k2::GetContext(*first, *second);
}
} // namespace
namespace k2 {
RaggedShape RandomRaggedShape(bool set_row_ids, int32_t min_num_axes,
int32_t max_num_axes, int32_t min_num_elements,
int32_t max_num_elements) {
NVTX_RANGE(K2_FUNC);
ContextPtr c = GetCpuContext();
K2_CHECK(min_num_axes >= 2 && max_num_axes >= min_num_axes &&
min_num_elements >= 0 && max_num_elements >= min_num_elements);
int32_t num_axes = RandInt(min_num_axes, max_num_axes);
int32_t num_elements = RandIntGeometric(min_num_elements, max_num_elements);
bool done_repeats = false;
std::vector<RaggedShapeLayer> axes(num_axes - 1);
for (int32_t axis = num_axes - 2; axis >= 0; axis--) {
// this axis will have row_ids of length num_elements and
// row_splits of length to be determined.
int32_t cur_row_split = 0;
std::vector<int32_t> row_splits_vec;
std::vector<int32_t> row_ids_vec;
row_splits_vec.push_back(cur_row_split);
// The reason for "|| RandInt(0, 2) == 0)" is so that even if there
// are no elements we can still potentially generate empty row-splits.
while (cur_row_split < num_elements || RandInt(0, 2) == 0) {
int32_t split_size = RandIntGeometric(0, num_elements - cur_row_split);
cur_row_split += split_size;
// sometimes we have a bunch of empty rows in a row (this will test out
// more of the code), so here we generate a bunch of empty rows, but we
// just do this only once (that's why we declare `done_repeats` here).
if (split_size == 0 && RandInt(0, 30) == 0 && !done_repeats) {
int32_t num_repeats = RandIntGeometric(1, 128);
row_splits_vec.insert(row_splits_vec.end(), num_repeats, cur_row_split);
// don't need to set `row_ids_vec` as there's no element.
done_repeats = true;
}
row_splits_vec.push_back(cur_row_split);
if (set_row_ids) {
int32_t cur_row = static_cast<int32_t>(row_splits_vec.size()) - 2;
row_ids_vec.insert(row_ids_vec.end(), split_size, cur_row);
}
}
axes[axis].row_splits = Array1<int32_t>(c, row_splits_vec);
if (set_row_ids) axes[axis].row_ids = Array1<int32_t>(c, row_ids_vec);
axes[axis].cached_tot_size = num_elements;
num_elements = axes[axis].row_splits.Dim() - 1;
}
// RaggedShape(axes, true) will check the returned RaggedShape for
// consistency.
return RaggedShape(axes, true);
}
RaggedShape RaggedShape2(Array1<int32_t> *row_splits, Array1<int32_t> *row_ids,
int32_t cached_tot_size) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(row_splits != nullptr || row_ids != nullptr)
<< "At least one of row_splits and row_ids must be defined";
ContextPtr ctx = ::GetContext(row_splits, row_ids);
if (cached_tot_size != -1) {
if (row_ids != nullptr) K2_CHECK_EQ(cached_tot_size, row_ids->Dim());
if (row_splits != nullptr) {
// may be slow as it may copy memory from device to host
K2_DCHECK_EQ(cached_tot_size, row_splits->Back())
<< "Bad row splits is: " << *row_splits;
}
}
std::vector<RaggedShapeLayer> axes(1);
if (row_splits != nullptr) {
axes[0].row_splits = *row_splits;
} else {
// we need to work out row_splits as we always require row_splits is not
// empty for RaggedShape. Note here we suppose the last element in row_ids
// is num_rows - 1, i.e. there's no empty rows after row `row_ids[-1]`.
int32_t num_rows = row_ids->Dim() == 0 ? 0 : row_ids->Back() + 1;
Array1<int32_t> row_splits_array(ctx, num_rows + 1);
RowIdsToRowSplits(*row_ids, &row_splits_array);
axes[0].row_splits = row_splits_array;
}
if (row_ids != nullptr) axes[0].row_ids = *row_ids;
if (cached_tot_size == -1) {
cached_tot_size =
row_ids != nullptr ? row_ids->Dim() : axes[0].row_splits.Back();
}
axes[0].cached_tot_size = cached_tot_size;
// note below line will check if row_splits and row_ids are valid and agree
// with each other.
return RaggedShape(axes);
}
RaggedShape ComposeRaggedShapes(const RaggedShape &a, const RaggedShape &b) {
NVTX_RANGE(K2_FUNC);
if (a.NumElements() != b.Dim0()) {
K2_LOG(FATAL) << "ComposeRaggedShapes: shape mismatch: " << a.NumElements()
<< " vs. " << b.Dim0();
}
const auto &a_axes = a.Layers();
const auto &b_axes = b.Layers();
std::vector<RaggedShapeLayer> axes(a_axes.size() + b_axes.size());
std::size_t a_size = a_axes.size(), b_size = b_axes.size();
for (std::size_t i = 0; i < a_size; ++i) axes[i] = a_axes[i];
for (std::size_t i = 0; i < b_size; ++i) axes[i + a_size] = b_axes[i];
bool validate = false;
return RaggedShape(axes, validate);
}
RaggedShape RaggedShape3(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape1 = RaggedShape2(row_splits1, row_ids1, cached_tot_size1);
Array1<int32_t> temp_array;
if (row_splits2 == nullptr) {
K2_CHECK_NE(row_ids2, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array = Array1<int32_t>(row_ids2->Context(), shape1.NumElements() + 1);
row_splits2 = &temp_array;
RowIdsToRowSplits(*row_ids2, row_splits2);
}
return ComposeRaggedShapes(
shape1, RaggedShape2(row_splits2, row_ids2, cached_tot_size2));
}
RaggedShape RaggedShape4(Array1<int32_t> *row_splits1,
Array1<int32_t> *row_ids1, int32_t cached_tot_size1,
Array1<int32_t> *row_splits2,
Array1<int32_t> *row_ids2, int32_t cached_tot_size2,
Array1<int32_t> *row_splits3,
Array1<int32_t> *row_ids3, int32_t cached_tot_size3) {
NVTX_RANGE(K2_FUNC);
RaggedShape shape12 = RaggedShape3(row_splits1, row_ids1, cached_tot_size1,
row_splits2, row_ids2, cached_tot_size2);
Array1<int32_t> temp_array;
if (row_splits3 == nullptr) {
K2_CHECK_NE(row_ids3, nullptr)
<< "Either row-splits or row-ids must be defined";
temp_array =
Array1<int32_t>(row_ids3->Context(), shape12.NumElements() + 1);
row_splits3 = &temp_array;
RowIdsToRowSplits(*row_ids3, row_splits3);
}
return ComposeRaggedShapes(
shape12, RaggedShape2(row_splits3, row_ids3, cached_tot_size3));
}
RaggedShape RaggedShapeFromTotSizes(ContextPtr c, int32_t num_axes,
int32_t *tot_sizes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeLayer> axes(num_axes - 1);
// In future we might choose to allocate everything in one big array, to avoid
// multiple allocations, but for now just do it the simple way.
for (int32_t axis = 1; axis < num_axes; ++axis) {
axes[axis - 1].row_splits = Array1<int32_t>(c, tot_sizes[axis - 1] + 1);
axes[axis - 1].row_ids = Array1<int32_t>(c, tot_sizes[axis]);
axes[axis - 1].cached_tot_size = tot_sizes[axis];
}
// Not check here as we did not set the values of row_splits and row_ids
return RaggedShape(axes, false);
}
Array1<int32_t *> GetRowSplitsPtr(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
std::vector<int32_t *> row_splits_start(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
Array1<int32_t> &cur_splits = src.RowSplits(i);
row_splits_start[i - 1] = cur_splits.Data();
}
return Array1<int32_t *>(src.Context(), row_splits_start);
}
// See declaration in ragged.h for documentation of its purpose and interface.
RaggedShape Unsqueeze(const RaggedShape &src, int32_t axis) {
// If axis == 0, initial row_splits and row_ids will look like the following,
// if for example src.Dim0() was 5: [ 0 5 ], [ 0 0 0 0 0 ]. The other axes
// would be pushed forward.
//
// If 0 < axis <= src.NumAxes(), the inserted row_splits and row_ids would
// look like the following, if for instance the src.TotSize(axis) = 8:
// [ 0 1 2 3 4 5 6 7 8 ], [ 0 1 2 3 4 5 6 7 ].
//
// The reason why the code is different for axis == 0, is that in that case we
// are really making visible an "implicit" axis of the input `src`; we could
// call it axis 0 of the original RaggedShape. Imagine that "implicit" axis's
// row_splits and row_ids map respectively from an idx_minus1 -> idx0 and from
// an idx_0 to idx_minus1, where idx_minus1 is always 0 and 0 <= idx0 <
// Dim0().
NVTX_RANGE(K2_FUNC);
ContextPtr c = src.Context();
K2_CHECK(axis >= 0 && axis <= src.NumAxes());
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
int32_t num_axes_in = src.NumAxes();
// Note: in RaggedShape, the vector of RaggedShapeLayer is of length
// num_axes - 1, so the output will have one more axis than the input.
std::vector<RaggedShapeLayer> axes_out(num_axes_in);
int32_t row_splits_dim, row_ids_dim;
Array1<int32_t> mem;
if (axis == 0) {
row_splits_dim = 2; // e.g. [ 0 5 ]
row_ids_dim = src.Dim0(); // e.g. [ 0 0 0 0 0 ]
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem, (int32_t i)->void {
if (i == 1)
mem_data[i] = row_ids_dim;
else
mem_data[i] = 0;
});
} else {
int32_t tot_size = src.TotSize(axis);
row_splits_dim = tot_size + 1;
row_ids_dim = tot_size;
mem = Array1<int32_t>(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, mem.Dim(), lambda_set_mem2,
(int32_t i)->void { mem_data[i] = i % (tot_size + 1); });
}
axes_out[axis].row_splits = mem.Range(0, row_splits_dim);
axes_out[axis].row_ids = mem.Range(row_splits_dim, row_ids_dim);
axes_out[axis].cached_tot_size = row_ids_dim;
for (int32_t i = 0; i < axis; ++i) axes_out[i] = axes_in[i];
// Note: the returned array has `num_axes_in + 1` axes, so its
// array of RaggedShapeLayer is of length `num_axes_in`.
for (int32_t i = axis + 1; i < num_axes_in; ++i) axes_out[i] = axes_in[i - 1];
return RaggedShape(axes_out);
}
std::vector<RaggedShape> UnsqueezeParallel(int32_t num_srcs, RaggedShape **src,
int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(axis, 0);
std::vector<RaggedShape> ans;
if (num_srcs == 0) return ans;
ans.reserve(num_srcs);
ContextPtr c = src[0]->Context();
std::vector<int32_t> all_row_splits_vec(num_srcs * 2);
int32_t max_dim = 0;
// all_row_splits_vec will contain [ 0 d0 0 d1 0 d2 .. ]
// where d0 == src[0]->Dim0(), d1 == src[1]->Dim0()..
for (int32_t i = 0; i < num_srcs; i++) {
int32_t this_dim0 = src[i]->Dim0();
if (this_dim0 > max_dim) max_dim = this_dim0;
all_row_splits_vec[i * 2] = 0;
all_row_splits_vec[i * 2 + 1] = this_dim0;
}
Array1<int32_t> all_row_splits(c, all_row_splits_vec);
Array1<int32_t> all_row_ids(c, max_dim, 0);
for (int32_t i = 0; i < num_srcs; i++) {
int32_t num_axes = src[i]->NumAxes();
std::vector<RaggedShapeLayer> axes;
axes.reserve(num_axes); // note, the size of the `layers` of a RaggedShape
// is its NumAxes() - 1.
axes.resize(1);
int32_t this_old_dim0 = all_row_splits_vec[i * 2 + 1];
axes[0].row_splits = all_row_splits.Range(i * 2, 2);
axes[0].row_ids = all_row_ids.Range(0, this_old_dim0);
axes[0].cached_tot_size = this_old_dim0;
axes.insert(axes.end(), src[i]->Layers().begin(), src[i]->Layers().end());
ans.emplace_back(axes);
}
return ans;
}
/*
Internal function used in Index(), which gets certain arrays used internally.
@param [in] src Source shape to be indexed
@param [in] src_row_splits_ptrs Result of calling GetRowSplitsPtr(src)
@param [in] new2old Array of indexes into axis 0 of src
@param [out] old_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()), whose (i,j)'th
element contains the offset into axis i of `src`
where the slice of `src` with index0 (i.e. index
into 0'th-axis of `src`) equal to `new2old[j]`
begins.
@param [out] new_offsets Will be set to new Array2 with dimension
(src.NumAxes(), new2old.Dim()+1), whose (i,j)'th
element contains the offset into axis i of `ans`
where the data in `ans` corresponding to
index j (i.e. index j into axis 0 of `ans`) begins.
Note: `ans` is the result of Index(), with
ans.Dim0() == new2old.Dim().
*/
inline void GetOldAndNewOffsets(RaggedShape &src,
const Array1<int32_t *> &src_row_splits_ptrs,
const Array1<int32_t> &new2old,
Array2<int32_t> *old_offsets,
Array2<int32_t> *new_offsets) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 1);
ContextPtr &c = src.Context();
int32_t num_axes = src.NumAxes(), ans_dim0 = new2old.Dim();
int32_t *const *src_row_splits_ptrs_data = src_row_splits_ptrs.Data();
const int32_t *new2old_data = new2old.Data();
*old_offsets = Array2<int32_t>(c, num_axes, ans_dim0);
*new_offsets = Array2<int32_t>(c, num_axes, ans_dim0 + 1);
auto old_offsets_acc = old_offsets->Accessor(),
new_offsets_acc = new_offsets->Accessor();
// Set old_offsets; and for now, set new_offsets to the corresponding
// sizes of the output slices.
K2_EVAL(
c, ans_dim0, lambda_set_offsets, (int32_t i)->void {
// 0 <= i < ans_dim0
int32_t old_offset = new2old_data[i], old_offset_next = old_offset + 1;
for (int32_t axis = 0;; axis++) {
old_offsets_acc(axis, i) = old_offset;
// Below, 'new_offsets_acc' currently contains the size rather
// than the offset; we need to do exclusive-sum.
new_offsets_acc(axis, i) = old_offset_next - old_offset;
if (axis + 1 == num_axes) return;
old_offset = src_row_splits_ptrs_data[axis][old_offset];
old_offset_next = src_row_splits_ptrs_data[axis][old_offset_next];
}
});
ExclusiveSum(*new_offsets, new_offsets);
}
RaggedShape Index(RaggedShape &src, const Array1<int32_t> &new2old,
Array1<int32_t> *elem_indexes /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
K2_CHECK(IsCompatible(src, new2old));
int32_t num_axes = src.NumAxes(), src_dim0 = src.Dim0(),
ans_dim0 = new2old.Dim();
if (ans_dim0 == 0) {
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, 0);
return EmptyRaggedShape(c, num_axes);
}
Array1<int32_t *> src_row_splits_ptrs = GetRowSplitsPtr(src);
Array2<int32_t> old_offsets, // num_axes by ans_dim0
new_offsets; // num_axes by (ans_dim0 + 1).
GetOldAndNewOffsets(src, src_row_splits_ptrs, new2old, &old_offsets,
&new_offsets);
Array1<int32_t> tot_sizes_out =
Array1<int32_t>(new_offsets.Col(ans_dim0)).To(GetCpuContext());
if (elem_indexes) *elem_indexes = Array1<int32_t>(c, tot_sizes_out.Back());
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes, tot_sizes_out.Data());
auto old_offsets_acc = old_offsets.Accessor(),
new_offsets_acc = new_offsets.Accessor();
ParallelRunner pr(c);
std::vector<cudaStream_t> streams(num_axes);
int32_t num_jobs = ans_dim0 * 2; // note: this formula is not a heuristic;
// it's how TaskRedirect works..
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *new_offsets_ptr = new_offsets_acc.Row(axis);
TaskRedirect *task_redirect_ptr = task_redirects_acc.Row(axis);
GetTaskRedirect(c, ans_dim0, new_offsets_ptr, task_redirect_ptr);
}
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
{
int32_t *this_new_row_splits = ans.RowSplits(axis + 1).Data();
const int32_t *this_old_row_splits = src.RowSplits(axis + 1).Data();
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// 0 <= ans_idx0 < ans_dim0; and 0 <= thread_idx < num_threads,
// num_threads may have any value > 0 as far as this code is concerned.
//
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_splits array are as given by
// the `axis`'th row of `offsets`; the values in the array
// are related to those in the `axis+1`'th row.
int32_t this_new_offset = new_offsets_acc(axis, ans_idx0),
next_new_offset = new_offsets_acc(axis, ans_idx0 + 1),
num_rows = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis, ans_idx0),
value_offset = new_offsets_acc(axis + 1, ans_idx0) -
old_offsets_acc(axis + 1, ans_idx0);
// Using <= instead of < below causes threads for different ans_idx0 to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_new_row_splits[this_new_offset + thread_idx] =
value_offset + this_old_row_splits[this_old_offset + thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
}
{
int32_t *this_new_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *this_old_row_ids = src.RowIds(axis + 1).Data();
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
if (elem_indexes == nullptr || axis != num_axes - 2) {
// If we don't need to write to `elem_indexes`... [caution: the next
// code block differs from this only by a statement that sets
// `elem_indexes` and they should be kept in sync.]
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
// The locations in the row_ids array are as given by
// the `axis+1`'th row of `offsets`; the values in the array
// are related to those in the `axis`'th row.
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
} else {
int32_t *elem_indexes_data = elem_indexes->Data();
// We need to write to `elem_indexes`. Note: this code block only
// differs from the above by an extra statement regarding
// `elem_indexes`. Comments have been removed.
auto lambda_set_row_ids_and_elem_indexes =
[=] __host__ __device__(int32_t ans_idx0, int32_t num_threads,
int32_t thread_idx) -> void {
int32_t this_new_offset = new_offsets_acc(axis + 1, ans_idx0),
next_new_offset = new_offsets_acc(axis + 1, ans_idx0 + 1),
num_elems = next_new_offset - this_new_offset,
this_old_offset = old_offsets_acc(axis + 1, ans_idx0),
value_offset = new_offsets_acc(axis, ans_idx0) -
old_offsets_acc(axis, ans_idx0);
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_new_row_ids[this_new_offset + thread_idx] =
value_offset + this_old_row_ids[this_old_offset + thread_idx];
elem_indexes_data[this_new_offset + thread_idx] =
this_old_offset + thread_idx;
}
};
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops,
lambda_set_row_ids_and_elem_indexes);
}
}
}
#if !defined(NDEBUG)
ans.Check();
#endif
return ans;
}
Array2<int32_t> GetOffsets(int32_t num_srcs, RaggedShape **src) {
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
ContextPtr ctx = src[0]->Context();
Array2<int32_t> src_offsets(GetCpuContext(), num_axes_in + 1, num_srcs + 1);
int32_t *src_offsets_data = src_offsets.Data();
int32_t src_offsets_stride0 = src_offsets.ElemStride0();
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
for (int32_t axis = 0; axis <= num_axes_in; ++axis) {
int32_t sum = 0;
for (int32_t i = 0; i <= num_srcs; ++i) { // i is the column
src_offsets_data[axis * src_offsets_stride0 + i] = sum;
if (i < num_srcs) {
sum += (axis == 0 ? 1 : src[i]->TotSize(axis - 1));
}
}
}
return src_offsets;
}
void GetRowInfo(RaggedShape &src, Array1<int32_t *> *row_splits,
Array1<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
int32_t axes = src.NumAxes();
K2_CHECK_GE(axes, 2);
src.Populate();
std::vector<int32_t *> row_splits_ptrs(axes - 1);
std::vector<int32_t *> row_ids_ptrs(axes - 1);
for (int32_t i = 1; i != axes; ++i) {
row_splits_ptrs[i - 1] = src.RowSplits(i).Data();
row_ids_ptrs[i - 1] = src.RowIds(i).Data();
}
ContextPtr ctx = src.Context();
*row_splits = Array1<int32_t *>(ctx, row_splits_ptrs);
*row_ids = Array1<int32_t *>(ctx, row_ids_ptrs);
}
void GetRowInfoMulti(int32_t num_srcs, RaggedShape **src,
Array2<int32_t *> *row_splits,
Array2<int32_t *> *row_ids) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
int32_t num_axes_in = src[0]->NumAxes();
K2_CHECK_GE(num_axes_in, 2);
ContextPtr ctx = src[0]->Context();
// check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(src[i]->NumAxes(), num_axes_in);
K2_CHECK(ctx->IsCompatible(*src[i]->Context()));
}
Array2<int32_t *> row_splits_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
Array2<int32_t *> row_ids_ptrs(GetCpuContext(), num_axes_in - 1, num_srcs);
int32_t **splits_ptr_data = row_splits_ptrs.Data();
int32_t **ids_ptr_data = row_ids_ptrs.Data();
int32_t stride0 = row_splits_ptrs.ElemStride0();
K2_CHECK_EQ(stride0, row_ids_ptrs.ElemStride0());
for (int32_t axis = 0; axis != num_axes_in - 1; ++axis) {
for (int32_t i = 0; i != num_srcs; ++i) {
splits_ptr_data[axis * stride0 + i] = src[i]->RowSplits(axis + 1).Data();
ids_ptr_data[axis * stride0 + i] = src[i]->RowIds(axis + 1).Data();
}
}
*row_splits = row_splits_ptrs.To(ctx);
*row_ids = row_ids_ptrs.To(ctx);
}
static RaggedShape AppendAxis0(int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* == nullptr*/) {
NVTX_RANGE(K2_FUNC);
if (num_srcs == 1) {
if (merge_map)
*merge_map = Arange<uint32_t>(src[0]->Context(), 0,
src[0]->NumElements());
return **src;
}
K2_CHECK_GT(num_srcs, 1);
int32_t num_axes = src[0]->NumAxes();
ContextPtr c = src[0]->Context();
bool is_cpu = (c->GetDeviceType() == kCpu);
// Check if they have same num-axes and compatible context
for (int32_t i = 1; i < num_srcs; ++i) {
K2_CHECK_EQ(num_axes, src[i]->NumAxes());
K2_CHECK(IsCompatible(*src[0], *src[i]));
}
// `offsets` will be on CPU for now.
Array2<int32_t> offsets = GetOffsets(num_srcs, src);
auto offsets_acc = offsets.Accessor();
std::vector<int32_t> tot_sizes_out(num_axes);
for (int32_t axis = 0; axis < num_axes; ++axis)
tot_sizes_out[axis] = offsets_acc(axis + 1, num_srcs);
RaggedShape ans = RaggedShapeFromTotSizes(c, num_axes,
tot_sizes_out.data());
Array2<int32_t *> src_row_splits, src_row_ids;
GetRowInfoMulti(num_srcs, src, &src_row_splits, &src_row_ids);
auto src_row_splits_acc = src_row_splits.Accessor(),
src_row_ids_acc = src_row_ids.Accessor();
offsets = offsets.To(c);
offsets_acc = offsets.Accessor(); // on GPU now (if we're using one)
ParallelRunner pr(c);
std::vector<cudaStream_t> streams(num_axes);
int32_t num_jobs = num_srcs * 2;
// task_redirects is a device array (if using GPU).
// We have `num_axes - 1` different sets of row_splits/row_ids to
// populate but they have different sizes; the total number of distinct
// sizes is `num_axes`.
Array2<TaskRedirect> task_redirects(c, num_axes, num_jobs);
auto task_redirects_acc = task_redirects.Accessor();
// populate task_redirects (these allocate blocks of threads roughly
// proportionally to the amount of data to process from this source.
for (int32_t axis = 0; axis < num_axes; ++axis) {
streams[axis] = pr.NewStream();
With w(streams[axis]);
const int32_t *offsets = offsets_acc.Row(axis + 1);
// c->GetCudaStream() == stream[axis] as it has been overridden by With
GetTaskRedirect(c, num_srcs, offsets, task_redirects_acc.Row(axis));
}
for (int32_t axis = 0; axis < num_axes - 1; axis++) {
// first set the row-splits.
int32_t **this_src_row_splits = src_row_splits_acc.Row(axis),
**this_src_row_ids = src_row_ids_acc.Row(axis);
int32_t *this_dest_row_splits = ans.RowSplits(axis + 1).Data(),
*this_dest_row_ids = ans.RowIds(axis + 1).Data();
const int32_t *offsets_this_axis = offsets_acc.Row(axis + 1),
*offsets_next_axis = offsets_acc.Row(axis + 2);
auto lambda_set_row_splits = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_splits work dimensionally: they are a map
// from, e.g. an idx0 to an idx0x. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_this_axis[src_idx],
next_offset = offsets_this_axis[src_idx + 1],
this_value_offset = offsets_next_axis[src_idx],
num_rows = next_offset - this_offset;
int32_t *src_row_splits_ptr = this_src_row_splits[src_idx];
// Using <= instead of < below causes threads for different src_idx to
// write a single overlapping value, but also ensures that the
// terminating value is written. This only works because row_splits
// vectors always start with 0, which is not necessarily the case
// for row-ids.
for (; thread_idx <= num_rows; thread_idx += num_threads) {
this_dest_row_splits[this_offset + thread_idx] =
this_value_offset + src_row_splits_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis],
target_num_loops = (is_cpu || tot_work > 1000000 ? 8 : 2);
EvalWithRedirect(streams[axis], num_jobs, task_redirects_acc.Row(axis),
min_threads_per_job, tot_work, target_num_loops,
lambda_set_row_splits);
{ // set the row-ids
auto lambda_set_row_ids = [=] __host__ __device__(
int32_t src_idx, int32_t num_threads,
int32_t thread_idx) -> void {
// Reminder of how row_ids work dimensionally: they are a map
// from, e.g. an idx01 to an idx0. An offsets_acc(0,n) is
// dimensionally an idx0; an offsets_acc(1,n) an idx01, and so on.
int32_t this_offset = offsets_next_axis[src_idx],
next_offset = offsets_next_axis[src_idx + 1],
this_value_offset = offsets_this_axis[src_idx],
num_elems = next_offset - this_offset;
int32_t *src_row_ids_ptr = this_src_row_ids[src_idx];
for (; thread_idx < num_elems; thread_idx += num_threads) {
this_dest_row_ids[this_offset + thread_idx] =
this_value_offset + src_row_ids_ptr[thread_idx];
}
};
int32_t min_threads_per_job = 2, tot_work = tot_sizes_out[axis + 1],
target_num_loops = (tot_work > 1000000 ? 4 : 2);
// TODO(haowen): maybe we should launch kernels for row_splits and row_ids
// in different streams
EvalWithRedirect(streams[axis + 1], num_jobs,
task_redirects_acc.Row(axis + 1), min_threads_per_job,
tot_work, target_num_loops, lambda_set_row_ids);
}
}
if (merge_map) {
std::vector<int32_t> num_elems_out(num_srcs);
for (int32_t i = 0; i < num_srcs; ++i)
num_elems_out[i] = src[i]->NumElements();
*merge_map = SizesToMergeMap(c, num_elems_out);
}
return ans;
}
RaggedShape Append(int32_t axis, int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* == nullptr*/) {
K2_CHECK(num_srcs > 0);
if (axis == 0)
return AppendAxis0(num_srcs, src, merge_map);
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
int32_t num_axes = src[0]->NumAxes();
std::vector<RaggedShapeLayer> ans_layers(num_axes - 1);
// If axis >= 2, some layers of `src` will pass through unchanged (we should
// check that they are identical across all sources).
for (int32_t l = 0; l + 1 < axis; l++) {
CheckLayerEqual(l, num_srcs, src);
ans_layers[l] = src[0]->Layers()[l];
}
Array1<uint32_t> merge_map_local;
Array1<uint32_t> *this_m = (axis + 1 == num_axes ? merge_map :
&merge_map_local);
RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m),
t = SubsampleRaggedLayer(s, 0, num_srcs);
ans_layers[axis - 1] = t.Layers()[0];
for (int32_t l = axis; l + 1 < num_axes; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m = (l + 2 == num_axes ? merge_map :
&merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src,
merge_map_local, this_m);
ans_layers[l] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
RaggedShape RemoveAxis(RaggedShape &src, int32_t axis) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
K2_CHECK(axis >= 0 && axis < src.NumAxes());
// note, `axes_in` is of dim src.NumAxes() - 1.
// Also note: axes_in[i] pertains to the relationship between
// axes i and i+1 in the source.
src.Populate();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(axes_in.size() - 1);
int32_t axes_out_size = static_cast<int32_t>(axes_out.size());
for (int32_t i = 0; i < axis - 1; ++i) axes_out[i] = axes_in[i];
if (axis > 0 && axis + 1 < src.NumAxes()) {
axes_out[axis - 1].row_ids =
axes_in[axis - 1].row_ids[axes_in[axis].row_ids];
axes_out[axis - 1].row_splits =
axes_in[axis].row_splits[axes_in[axis - 1].row_splits];
axes_out[axis - 1].cached_tot_size = axes_out[axis - 1].row_ids.Dim();
}
for (int32_t i = axis; i < axes_out_size; ++i) axes_out[i] = axes_in[i + 1];
return RaggedShape(axes_out);
}
RaggedShape MakeTransposable(RaggedShape &src) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 1) return src;
ContextPtr c = src.Context();
int32_t num_axes = src.NumAxes();
int32_t max_size = src.MaxSize(1);
if (max_size <= 0) return src;
int32_t ans_tot_size1 = max_size * src_dim0;
src.Populate();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(num_axes - 1);
const int32_t *src_row_splits1_data = src.RowSplits(1).Data();
const int32_t *src_row_ids1_data = src.RowIds(1).Data();
{
ParallelRunner pr(c);
RaggedShapeLayer &axis1_shape = axes_out[0];
{
// set ans.RowSplits(1);
With w(pr.NewStream());
axis1_shape.row_splits = Range(c, src_dim0 + 1, 0, max_size);
}
{
// set ans.RowIds(1);
With w(pr.NewStream());
axis1_shape.row_ids = Array1<int32_t>(c, ans_tot_size1);
int32_t *row_ids1_data = axis1_shape.row_ids.Data();
axis1_shape.cached_tot_size = ans_tot_size1;
K2_EVAL(
c, ans_tot_size1, lambda_set_row_ids1,
(int32_t i)->void { row_ids1_data[i] = i / max_size; });
}
if (num_axes > 2) {
RaggedShapeLayer &axis2_shape = axes_out[1];
const int32_t *src_row_splits2_data = src.RowSplits(2).Data();
{
// set ans.RowSplits(2);
With w(pr.NewStream());
axis2_shape.cached_tot_size = src.TotSize(2);
axis2_shape.row_splits = Array1<int32_t>(c, ans_tot_size1 + 1);
int32_t *ans_row_splits2_data = axis2_shape.row_splits.Data();
K2_EVAL(
c, ans_tot_size1 + 1, lambda_set_row_splits2,
(int32_t idx01)->void {
if (idx01 == ans_tot_size1) {
ans_row_splits2_data[idx01] =
src_row_splits2_data[src_tot_size1];
return;
}
int32_t idx0 = idx01 / max_size, idx1 = idx01 % max_size;
int32_t idx0x = src_row_splits1_data[idx0],
idx0x_next = src_row_splits1_data[idx0 + 1];
int32_t num_elems_this_row = idx0x_next - idx0x;
if (idx1 < num_elems_this_row)
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x + idx1];
else
ans_row_splits2_data[idx01] =
src_row_splits2_data[idx0x_next]; // append empty row
});
}
{
// set ans.RowIds(2);
With w(pr.NewStream());
int32_t tot_size2 = src.TotSize(2);
axis2_shape.row_ids = Array1<int32_t>(c, tot_size2);
int32_t *ans_row_ids2_data = axis2_shape.row_ids.Data();
const int32_t *src_row_ids2_data = src.RowIds(2).Data();
K2_EVAL(
c, tot_size2, lambda_set_row_ids2, (int32_t idx012)->void {
int32_t src_idx01 = src_row_ids2_data[idx012];
int32_t src_idx0 = src_row_ids1_data[src_idx01];
int32_t src_idx1 = src_idx01 - src_row_splits1_data[src_idx0];
ans_row_ids2_data[idx012] = (src_idx0 * max_size) + src_idx1;
});
}
}
}
// copy left row_splits and row_ids;
for (int32_t i = 2; i < num_axes - 1; ++i) axes_out[i] = axes_in[i];
return RaggedShape(axes_out);
}
// transpose axes 0 and 1.
RaggedShape Transpose(RaggedShape &src, Array1<int32_t> *value_indexes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(src.NumAxes(), 2);
ContextPtr c = src.Context();
int32_t src_dim0 = src.Dim0(), src_tot_size1 = src.TotSize(1);
if (src_dim0 <= 0) return src;
int32_t src_dim1 = src_tot_size1 / src_dim0;
K2_CHECK_EQ(src_tot_size1 % src_dim0, 0)
<< "Transpose(): all dims on axis 0 must be the same.\n"
<< "src_tot_size1: " << src_tot_size1 << "\n"
<< "src_dim0: " << src_dim0 << ", array is: " << src;
K2_DCHECK(
Equal(src.RowSplits(1), Range(c, src.RowSplits(1).Dim(), 0, src_dim1)))
<< " Expected row-splits to be evenly spaced: " << src.RowSplits(1);
RaggedShape src_no_axis0 = RemoveAxis(src, 0);
K2_CHECK_EQ(src_no_axis0.Dim0(), src_tot_size1);
// `renumbering` is a `new2old` map, that maps from the first index in
// src_no_axis0_renumbered
// to the first index into src_no_axis0.
Array1<int32_t> renumbering(c, src_tot_size1);
int32_t *renumbering_data = renumbering.Data();
K2_EVAL(
c, src_tot_size1, lambda_set_renumbering, (int32_t i)->void {
int32_t j = i % src_dim0, k = i / src_dim0, i_old = j * src_dim1 + k;
renumbering_data[i] = i_old;
});
RaggedShape src_no_axis0_renumbered =
Index(src_no_axis0, renumbering, value_indexes);
int32_t num_rows = src_dim1, row_splits_dim = num_rows + 1,
row_ids_dim = src_tot_size1;
std::vector<RaggedShapeLayer> ans_axis0(1);
Array1<int32_t> mem(c, row_splits_dim + row_ids_dim);
int32_t *mem_data = mem.Data();
K2_EVAL(
c, row_splits_dim + row_ids_dim, lambda_set_row_info, (int32_t i)->void {
int32_t val;
if (i >= row_splits_dim) {
// row_ids
int32_t elem_idx = i - row_splits_dim;
val = elem_idx / src_dim0;
} else {
// row_splits
int32_t row_idx = i;
val = row_idx * src_dim0;
}
mem_data[i] = val;
});
ans_axis0[0].row_splits = mem.Range(0, row_splits_dim);
ans_axis0[0].row_ids = mem.Range(row_splits_dim, row_ids_dim);
ans_axis0[0].cached_tot_size = row_ids_dim;
RaggedShape temp(ans_axis0);
return ComposeRaggedShapes(temp, src_no_axis0_renumbered);
}
RaggedShape Stack(int32_t axis, int32_t num_srcs, RaggedShape **src,
Array1<uint32_t> *merge_map /* = nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GT(num_srcs, 0);
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
ContextPtr c = src[0]->Context();
if (axis == 0) {
RaggedShape ans_appended = AppendAxis0(num_srcs, src, merge_map);
ContextPtr cpu = GetCpuContext();
Array1<int32_t> row_splits(cpu, num_srcs + 1);
int32_t *row_splits_data = row_splits.Data();
for (int32_t i = 0; i < num_srcs; i++)
row_splits_data[i] = src[i]->Dim0();
int32_t cutoff = 32;
if (num_srcs < cutoff) row_splits = row_splits.To(c);
ExclusiveSum(row_splits, &row_splits);
if (num_srcs >= cutoff) row_splits = row_splits.To(c);
int32_t num_elems = ans_appended.Dim0();
Array1<int32_t> row_ids(c, num_elems);
RowSplitsToRowIds(row_splits, &row_ids);
RaggedShape ans_layer0 = RaggedShape2(&row_splits, &row_ids, num_elems);
return ComposeRaggedShapes(ans_layer0, ans_appended);
}
K2_CHECK_LT(static_cast<uint32_t>(axis),
static_cast<uint32_t>(src[0]->NumAxes()));
int32_t num_axes = src[0]->NumAxes();
std::vector<RaggedShapeLayer> ans_layers(num_axes);
// If axis >= 2, some layers of `src` will pass through unchanged (we should
// check that they are identical across all sources).
for (int32_t l = 0; l + 1 < axis; l++) {
CheckLayerEqual(l, num_srcs, src);
ans_layers[l] = src[0]->Layers()[l];
}
Array1<uint32_t> merge_map_local;
Array1<uint32_t> *this_m = (axis + 1 == num_axes ? merge_map :
&merge_map_local);
RaggedShape s = IntersperseRaggedLayer(axis - 1, num_srcs, src, this_m);
// note: s.Dim0() will be a multiple of num_srcs.
ans_layers[axis - 1] = RegularRaggedShape(c, s.Dim0() / num_srcs,
num_srcs).Layers()[0];
ans_layers[axis] = s.Layers()[0];
for (int32_t l = axis; l + 1 < num_axes; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m = (l + 2 == num_axes ? merge_map :
&merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src,
merge_map_local, this_m);
ans_layers[l + 1] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
RaggedShape Merge(int32_t num_srcs,
RaggedShape **src,
const Array1<uint32_t> &merge_map,
Array1<uint32_t> *merge_map_out) {
K2_CHECK(num_srcs > 0);
int32_t num_layers = src[0]->NumAxes() - 1;
std::vector<RaggedShapeLayer> ans_layers(num_layers);
// Note: this is a shallow copy.
Array1<uint32_t> merge_map_local = merge_map;
for (int32_t l = 0; l < num_layers; l++) {
Array1<uint32_t> merge_map_next;
Array1<uint32_t> *this_m = (l + 1 == num_layers ? merge_map_out :
&merge_map_next);
RaggedShape r = MergeRaggedLayer(l, num_srcs, src, merge_map_local, this_m);
ans_layers[l] = r.Layers()[0];
merge_map_local = merge_map_next;
}
// TODO(dan) after this is debugged: add ", false".
return RaggedShape(ans_layers);
}
RaggedShape TrivialShape(ContextPtr &c, int32_t num_elems) {
NVTX_RANGE(K2_FUNC);
// row_splits= [
Array1<int32_t> row_splits = Range<int32_t>(c, 2, 0, num_elems);
Array1<int32_t> row_ids(c, num_elems, 0);
return RaggedShape2(&row_splits, &row_ids, num_elems);
}
RaggedShape RegularRaggedShape(ContextPtr &c, int32_t dim0, int32_t dim1) {
NVTX_RANGE(K2_FUNC);
Array1<int32_t> row_splits = Range<int32_t>(c, dim0 + 1, 0, dim1);
int32_t *row_splits_data = row_splits.Data();
Array1<int32_t> row_ids(c, dim0 * dim1);
int32_t *row_ids_data = row_ids.Data();
K2_EVAL2(
c, dim0, dim1, lambda_set_row_ids,
(int32_t i, int32_t j)->void { row_ids_data[i * dim1 + j] = i; });
return RaggedShape2(&row_splits, &row_ids, dim0 * dim1);
}
Ragged<int32_t> GetCountsPartitioned(Ragged<int32_t> &src,
RaggedShape &ans_ragged_shape) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 2);
K2_CHECK_EQ(ans_ragged_shape.NumAxes(), 2);
K2_CHECK(IsCompatible(src, ans_ragged_shape));
K2_CHECK_EQ(src.Dim0(), ans_ragged_shape.Dim0());
const Array1<int32_t> &values = src.values;
const Array1<int32_t> &row_splits = ans_ragged_shape.RowSplits(1);
int32_t n = ans_ragged_shape.NumElements();
Array1<int32_t> counts = GetCounts(values, n);
return Ragged<int32_t>(ans_ragged_shape, counts);
}
static Array1<int32_t> GetTransposeReorderingCpu(Ragged<int32_t> &src,
int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
std::vector<std::vector<int32_t>> column_indexes(num_cols); // [column][row]
const int32_t *values_data = src.values.Data();
int32_t n = src.values.Dim();
for (int32_t i = 0; i != n; ++i) {
int32_t bucket = values_data[i];
column_indexes[bucket].push_back(i);
}
Array1<int32_t> ans(src.Context(), n);
int32_t *ans_data = ans.Data();
for (int32_t i = 0; i != num_cols; ++i) {
std::copy(column_indexes[i].begin(), column_indexes[i].end(), ans_data);
ans_data += column_indexes[i].size();
}
return ans;
}
static Array1<int32_t> GetTransposeReorderingThreeAxesCuda(Ragged<int32_t> &src,
int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src.NumAxes(), 3);
ContextPtr &context = src.Context();
K2_CHECK_EQ(context->GetDeviceType(), kCuda);
const Array1<int32_t> &row_splits1 = src.RowSplits(1);
const int32_t *row_ids2_data = src.RowIds(2).Data();
const int32_t *value_data = src.values.Data();
Array1<int32_t> segments = src.RowSplits(2)[row_splits1];
auto lambda_comp = [=] __device__(int32_t a_idx012,
int32_t b_idx012) -> bool {
int32_t a_col_index = value_data[a_idx012];
int32_t b_col_index = value_data[b_idx012];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// at this point, a_idx012 and b_idx012 belong to the same column;
// then we sort by its row indexes
int32_t a_idx01 = row_ids2_data[a_idx012];
int32_t b_idx01 = row_ids2_data[b_idx012];
if (a_idx01 < b_idx01) return true;
if (a_idx01 > b_idx01) return false;
// at this point, a_idx012 and b_idx012 are duplicate elements
return false; // either true or false is fine
};
mgpu::context_t *mgpu_context = GetModernGpuAllocator(context);
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
K2_CUDA_SAFE_CALL(mgpu::segmented_sort(ans.Data(), // keys
ans.Dim(), // count
segments.Data(), // segments
segments.Dim() - 1, // num_segments
lambda_comp, *mgpu_context));
return ans;
}
Array1<int32_t> GetTransposeReordering(Ragged<int32_t> &src, int32_t num_cols) {
NVTX_RANGE(K2_FUNC);
ContextPtr &context = src.Context();
if (src.NumAxes() < 2) {
// src is empty
return Array1<int32_t>(context, 0);
}
DeviceType device_type = context->GetDeviceType();
if (device_type == kCpu) return GetTransposeReorderingCpu(src, num_cols);
K2_CHECK_EQ(device_type, kCuda);
if (src.NumAxes() == 3)
return GetTransposeReorderingThreeAxesCuda(src, num_cols);
const int32_t *row_splits1_data = src.RowSplits(src.NumAxes() - 1).Data();
const int32_t *row_ids1_data = src.RowIds(src.NumAxes() - 1).Data();
const int32_t *value_data = src.values.Data();
int32_t n = src.values.Dim();
Array1<int32_t> ans = Range(context, n, 0);
if (n == 0) return ans;
auto lambda_comp = [=] __device__(int32_t a_idx01, int32_t b_idx01) -> bool {
int32_t a_idx0 = row_ids1_data[a_idx01];
int32_t b_idx0 = row_ids1_data[b_idx01];
int32_t a_col_index = value_data[a_idx01];
int32_t b_col_index = value_data[b_idx01];
if (a_col_index < b_col_index) return true; // sort by column indexes
if (a_col_index > b_col_index) return false;
// now we have a_col_index == b_col_index
if (a_idx0 < b_idx0) return true; // sort by row indexes
if (a_idx0 > b_idx0) return false;
// now we have a_idx0 == b_idx0 && a_col_index == b_col_index
// this entry is duplicated in the sparse matrix.
return false; // we can return either true or false here.
};
mgpu::context_t *mgpu_context = GetModernGpuAllocator(context);
K2_CUDA_SAFE_CALL(mgpu::mergesort(ans.Data(), n, lambda_comp, *mgpu_context));
return ans;
}
RaggedShape ChangeSublistSize(RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeLayer axes).
std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1),
src_num_elems = src.TotSize(last_axis),
num_elems = src_num_elems + size_delta * num_rows;
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
ans_axes.back().row_ids = Array1<int32_t>(c, num_elems);
ans_axes.back().cached_tot_size = num_elems;
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data(),
*src_row_ids_data = src.RowIds(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data(),
*row_ids_data = ans_axes.back().row_ids.Data();
{
ParallelRunner pr(c);
{
With w(pr.NewStream());
K2_EVAL(
c, num_rows + 1, lambda_set_row_splits, (int32_t idx0)->void {
row_splits_data[idx0] =
src_row_splits_data[idx0] + size_delta * idx0;
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, src_num_elems, lambda_set_row_ids1, (int32_t src_idx01)->void {
int32_t src_idx0 = src_row_ids_data[src_idx01],
src_idx0x = src_row_splits_data[src_idx0],
src_idx1 = src_idx01 - src_idx0x,
new_idx0x = row_splits_data[src_idx0],
new_idx0x_next = row_splits_data[src_idx0 + 1],
new_idx01 = new_idx0x + src_idx1;
// it's only necessary to guard the next statement with in 'if'
// because size_delta might be negative.
if (new_idx01 < new_idx0x_next) row_ids_data[new_idx01] = src_idx0;
});
}
if (size_delta > 0) {
// This sets the row-ids that are not set by lambda_set_row_ids1.
With w(pr.NewStream());
K2_EVAL(
c, num_rows * size_delta, lambda_set_row_ids2, (int32_t i)->void {
int32_t idx0 = i / size_delta, n = i % size_delta,
next_idx0 = idx0 + 1;
// The following formula is the same as the one in
// lambda_set_row_splits; we want to compute the new value of
// row_splits_data[next_idx0] without waiting for that kernel to
// terminate.
int32_t next_idx0x =
src_row_splits_data[next_idx0] + size_delta * next_idx0;
row_ids_data[next_idx0x - 1 - n] = idx0;
});
}
// make the ParallelRunner go out of scope (should do this before any
// validation code that gets invoked by the constructor of RaggedShape
// below).
}
return RaggedShape(ans_axes);
}
// TODO(dan): this could definitely be made more efficient.
RaggedShape ChangeSublistSizePinned(RaggedShape &src, int32_t size_delta) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(src.NumAxes(), 2);
// the result will have the same num-axes as `src` (the NumAxes() of the
// object is not the same as the number of RaggedShapeLayer axes).
std::vector<RaggedShapeLayer> ans_axes(src.NumAxes() - 1);
int32_t last_axis = src.NumAxes() - 1;
// The following will only do something if src.NumAxes() > 2.
for (int32_t i = 0; i + 1 < last_axis; ++i) ans_axes[i] = src.Layers()[i];
ContextPtr &c = src.Context();
int32_t num_rows = src.TotSize(last_axis - 1);
ans_axes.back().row_splits = Array1<int32_t>(c, num_rows + 1);
const int32_t *src_row_splits_data = src.RowSplits(last_axis).Data();
int32_t *row_splits_data = ans_axes.back().row_splits.Data();
K2_EVAL(
c, num_rows, lambda_set_row_sizes, (int32_t idx0)->void {
int32_t orig_size =
src_row_splits_data[idx0 + 1] - src_row_splits_data[idx0],
size;
if (orig_size == 0 || orig_size + size_delta <= 0)
size = 0;
else
size = orig_size + size_delta;
row_splits_data[idx0] = size;
});
ExclusiveSum(ans_axes.back().row_splits, &ans_axes.back().row_splits);
ans_axes.back().row_ids =
Array1<int32_t>(c, ans_axes.back().row_splits.Back());
RowSplitsToRowIds(ans_axes.back().row_splits, &ans_axes.back().row_ids);
ans_axes.back().cached_tot_size = ans_axes.back().row_ids.Dim();
return RaggedShape(ans_axes);
}
RaggedShape Prefix(RaggedShape &src, int32_t n) {
NVTX_RANGE(K2_FUNC);
int32_t dim0 = src.Dim0();
K2_CHECK(n >= 0 && n <= dim0);
src.Populate();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
std::vector<RaggedShapeLayer> axes_out(axes_in.size());
int32_t row_end = n;
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits = axes_in[axis].row_splits.Arange(0, row_end + 1);
// notice here we may do a memory copy from GPU to CPU.
row_end = axes_in[axis].row_splits[row_end];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
return RaggedShape(axes_out);
}
std::vector<RaggedShape> GetPrefixes(RaggedShape &src,
const std::vector<int32_t> &sizes) {
NVTX_RANGE(K2_FUNC);
src.Populate();
int32_t dim0 = src.Dim0();
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
ContextPtr &c = src.Context();
const std::vector<RaggedShapeLayer> &axes_in = src.Layers();
// get those row_end elements at each axis.
int32_t ans_size = static_cast<int32_t>(sizes.size());
Array1<int32_t> row_ends(c, num_axes * ans_size);
Array1<int32_t> sizes_array(GetCpuContext(), sizes);
Array1<int32_t> indexes = row_ends.Arange(0, ans_size);
indexes.CopyFrom(sizes_array);
for (int32_t axis = 1; axis < num_axes; ++axis) {
Array1<int32_t> curr_axis_row_ends =
row_ends.Arange(axis * ans_size, (axis + 1) * ans_size);
axes_in[axis - 1].row_splits.Index(indexes, &curr_axis_row_ends);
indexes = curr_axis_row_ends;
}
row_ends = row_ends.To(GetCpuContext());
std::vector<RaggedShape> ans(ans_size);
for (int32_t i = 0; i != ans_size; ++i) {
std::vector<RaggedShapeLayer> axes_out(axes_in.size());
int32_t row_end = row_ends[i];
K2_CHECK(row_end >= 0 && row_end <= dim0);
for (int32_t axis = 0; axis < num_axes - 1; ++axis) {
axes_out[axis].row_splits =
axes_in[axis].row_splits.Arange(0, row_end + 1);
row_end = row_ends[i + (axis + 1) * ans_size];
axes_out[axis].row_ids = axes_in[axis].row_ids.Arange(0, row_end);
axes_out[axis].cached_tot_size = row_end;
}
ans[i] = RaggedShape(axes_out, false);
}
return ans;
}
Ragged<int32_t> AddSuffixToRagged(Ragged<int32_t> &src,
const Array1<int32_t> &suffix) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
K2_CHECK_EQ(suffix.Dim(), src.TotSize(num_axes - 2));
ContextPtr &c = src.Context();
Array1<int32_t> dst_values(c, src.NumElements() + suffix.Dim());
RaggedShape dst_shape = ChangeSublistSize(src.shape, 1);
// "row_splits1" and "row_ids1" below are actually on the last axis. We name
// them with "1" so that we can use "idx01" and "idx0" for those indexes in
// lambda, following the naming convention explained in k2/csrc/utils.h
const int32_t *dst_row_splits1_data =
dst_shape.RowSplits(num_axes - 1).Data(),
*dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(),
*src_values_data = src.values.Data(),
*suffix_data = suffix.Data();
int32_t *dst_values_data = dst_values.Data();
K2_EVAL(
c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void {
int32_t idx0 = dst_row_ids1_data[idx01];
if (idx01 == dst_row_splits1_data[idx0 + 1] - 1) {
// idx01 points to the last element of this row; copy from suffix
dst_values_data[idx01] = suffix_data[idx0];
} else {
// copy from src
int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01];
dst_values_data[idx01] = src_values_data[src_idx01];
}
});
return Ragged<int32_t>(dst_shape, dst_values);
}
Ragged<int32_t> AddPrefixToRagged(Ragged<int32_t> &src,
const Array1<int32_t> &prefix) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
K2_CHECK_GE(num_axes, 2);
K2_CHECK_EQ(prefix.Dim(), src.TotSize(num_axes - 2));
ContextPtr &c = src.Context();
Array1<int32_t> dst_values(c, src.NumElements() + prefix.Dim());
RaggedShape dst_shape = ChangeSublistSize(src.shape, 1);
// "row_splits1" and "row_ids1" below are actually on the last axis. We name
// them with "1" so that we can use "idx01" and "idx0" for those indexes in
// lambda, following the naming convention explained in k2/csrc/utils.h
const int32_t *dst_row_splits1_data =
dst_shape.RowSplits(num_axes - 1).Data(),
*dst_row_ids1_data = dst_shape.RowIds(num_axes - 1).Data(),
*src_values_data = src.values.Data(),
*prefix_data = prefix.Data();
int32_t *dst_values_data = dst_values.Data();
K2_EVAL(
c, dst_shape.NumElements(), lambda_copy_values, (int32_t idx01)->void {
int32_t idx0 = dst_row_ids1_data[idx01];
if (idx01 == dst_row_splits1_data[idx0]) {
// idx01 points to the first element of this row; copy from prefix
dst_values_data[idx01] = prefix_data[idx0];
} else {
// copy from src
int32_t src_idx01 = idx01 - dst_row_ids1_data[idx01] - 1;
dst_values_data[idx01] = src_values_data[src_idx01];
}
});
return Ragged<int32_t>(dst_shape, dst_values);
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &renumbering) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(renumbering.NumOldElems(), src.NumElements());
// Make sure final row-ids are populated.
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeLayer> axes = src.Layers();
axes.back().row_ids = axes.back().row_ids[renumbering.New2Old()];
axes.back().row_splits = renumbering.Old2New()[axes.back().row_splits];
axes.back().cached_tot_size = axes.back().row_ids.Dim();
return RaggedShape(axes);
}
RaggedShape SubsampleRaggedShape(RaggedShape &src, Renumbering &r_before_last,
Renumbering &r_last) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(r_before_last.NumOldElems(), src.TotSize(src.NumAxes() - 2));
K2_CHECK_EQ(r_last.NumOldElems(), src.NumElements());
// Make sure final and before-final row-ids are populated.
src.RowIds(src.NumAxes() - 2);
src.RowIds(src.NumAxes() - 1);
std::vector<RaggedShapeLayer> axes = src.Layers();
// Suppose this shape has 3 axes (0,1,2). Its NumAxes()==3;
// axes.size()==2.
// r_before_last deals with the numbering on axis 1.
// r_last deals with the numbering on axis 2.
RaggedShapeLayer &before_last = axes[axes.size() - 2],
&last = axes[axes.size() - 1];
int32_t new_tot_size1 = r_before_last.NumNewElems(),
new_tot_size2 = r_last.NumNewElems();
ContextPtr c = src.Context();
Array1<int32_t> before_last_row_ids(c, new_tot_size1),
last_row_splits(c, new_tot_size1 + 1), last_row_ids(c, new_tot_size2);
// The variable names below use this 3-axis assumption but the
// code will work for greater number of axes.
int32_t *new_row_ids1_data = before_last_row_ids.Data(),
*new_row_splits2_data = last_row_splits.Data(),
*new_row_ids2_data = last_row_ids.Data();
const int32_t *old_row_ids1_data = before_last.row_ids.Data(),
*old_row_splits2_data = last.row_splits.Data(),
*old_row_ids2_data = last.row_ids.Data();
const int32_t *idx01_new2old_data = r_before_last.New2Old().Data(),
*idx01_old2new_data = r_before_last.Old2New().Data(),
*idx012_new2old_data = r_last.New2Old().Data(),
*idx012_old2new_data = r_last.Old2New().Data();
ParallelRunner pr(c);
{
With w(pr.NewStream());
// before_last.row_splits maps from idx0 -> idx01 (contains idx01's). Map
// the idx01's; the idx0s stay the same.
before_last.row_splits = r_before_last.Old2New()[before_last.row_splits];
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size1 + 1, lambda_set_row_ids1_and_row_splits2,
(int32_t new_idx01)->void {
// row_ids1 maps from idx01 -> idx0. Select subset of
// idx01's; the idx0 stays the same.
int32_t old_idx01 = idx01_new2old_data[new_idx01];
if (new_idx01 < new_tot_size1)
new_row_ids1_data[new_idx01] = old_row_ids1_data[old_idx01];
// row_splits2 maps from idx01 -> idx012. Map both indexes.
// idx01's; the idx0 stays the same.
new_row_splits2_data[new_idx01] =
idx012_old2new_data[old_row_splits2_data[old_idx01]];
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, new_tot_size2, lambda_set_row_ids2, (int32_t new_idx012)->void {
// row_ids2 maps from idx012 -> idx01. Both must be mapped.
int32_t old_idx012 = idx012_new2old_data[new_idx012];
int32_t old_idx01 = old_row_ids2_data[old_idx012],
new_idx01 = idx01_old2new_data[old_idx01];
new_row_ids2_data[new_idx012] = new_idx01;
});
}
before_last.row_ids = before_last_row_ids;
before_last.cached_tot_size = new_tot_size1;
last.row_splits = last_row_splits;
last.row_ids = last_row_ids;
last.cached_tot_size = new_tot_size2;
return RaggedShape(axes);
}
RaggedShape EmptyRaggedShape(ContextPtr &c, int32_t num_axes) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_GE(num_axes, 2);
std::vector<RaggedShapeLayer> axes(num_axes - 1);
axes[0].row_splits = Array1<int32_t>(c, 1, 0);
// row_ids will be the empty vector, with context `c`.
axes[0].row_ids = axes[0].row_splits.Range(0, 0);
axes[0].cached_tot_size = 0;
for (int32_t a = 1; a + 1 < num_axes; a++) axes[a] = axes[0];
return RaggedShape(axes);
}
Array1<int32_t> GetDecreasingSizeOrder(RaggedShape &shape) {
ContextPtr c = shape.Context();
Array1<int32_t> sizes = RowSplitsToSizes(shape.RowSplits(1));
Array1<int32_t> index_map;
Sort<int32_t, GreaterThan<int32_t>>(&sizes, &index_map);
return index_map;
}
RaggedShape GetLayer(const RaggedShape &src, int32_t layer) {
K2_CHECK_GE(layer, 0);
K2_CHECK_LT(layer, src.NumAxes() - 1);
std::vector<RaggedShapeLayer> layers;
layers.push_back(src.Layers()[layer]);
bool check = false;
return RaggedShape(layers, check);
}
void DecomposeRaggedShape(const RaggedShape &src,
int32_t axis,
RaggedShape *top, RaggedShape *bottom) {
K2_CHECK_GT(axis, 0);
K2_CHECK_LT(axis, src.NumAxes() - 1);
const std::vector<RaggedShapeLayer> &src_layers = src.Layers();
std::vector<RaggedShapeLayer> top_layers(axis),
bottom_layers(src_layers.size() - axis);
int32_t src_size = src_layers.size();
for (int32_t i = 0; i < axis; ++i)
top_layers[i] = src_layers[i];
for (int32_t i = axis; i < src_size; ++i)
bottom_layers[i - axis] = src_layers[i];
*top = RaggedShape(top_layers);
*bottom = RaggedShape(bottom_layers);
}
RaggedShape RemoveEmptyLists(RaggedShape &src_shape,
int32_t axis,
Renumbering *renumbering_out) {
if (axis == 0) {
return RemoveEmptyListsAxis0(src_shape, renumbering_out);
}
RaggedShape top_shape, bottom_shape;
DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape);
Renumbering r_temp;
if (!renumbering_out)
renumbering_out = &r_temp;
bottom_shape = RemoveEmptyListsAxis0(bottom_shape, renumbering_out);
top_shape = SubsampleRaggedShape(top_shape, *renumbering_out);
return ComposeRaggedShapes(top_shape, bottom_shape);
}
RaggedShape RemoveSomeEmptyLists(RaggedShape &src_shape,
int32_t axis,
Renumbering &renumbering) {
if (axis == 0) {
return RenumberAxis0Simple(src_shape, renumbering);
}
RaggedShape top_shape, bottom_shape;
DecomposeRaggedShape(src_shape, axis, &top_shape, &bottom_shape);
bottom_shape = RenumberAxis0Simple(bottom_shape, renumbering);
top_shape = SubsampleRaggedShape(top_shape, renumbering);
return ComposeRaggedShapes(top_shape, bottom_shape);
}
RaggedShape RemoveEmptyListsAxis0(RaggedShape &src_shape,
Renumbering *renumbering_out) {
Renumbering r_temp;
if (!renumbering_out)
renumbering_out = &r_temp;
ContextPtr c = src_shape.Context();
int32_t num_lists = src_shape.Dim0();
*renumbering_out = Renumbering(c, num_lists);
int32_t *row_splits_data = src_shape.RowSplits(1).Data();
char *keep_data = renumbering_out->Keep().Data();
K2_EVAL(c, num_lists + 1, lambda_set_keep, (int32_t i) -> void {
keep_data[i] = (row_splits_data[i+1] != row_splits_data[i]);
});
return RenumberAxis0Simple(src_shape, *renumbering_out);
}
RaggedShape RenumberAxis0Simple(RaggedShape &src_shape,
Renumbering &renumbering) {
K2_CHECK_EQ(renumbering.NumOldElems(), src_shape.Dim0());
ContextPtr c = src_shape.Context();
src_shape.RowIds(1); // make sure RowIds(1) is populated.
std::vector<RaggedShapeLayer> layers = src_shape.Layers();
int32_t num_layers = layers.size();
int32_t new_num_lists = renumbering.NumNewElems(),
num_elems = src_shape.TotSize(1); // unchanged old vs. new.
Array1<int32_t> new_row_splits(c, new_num_lists + 1),
new_row_ids = renumbering.Old2New()[src_shape.RowIds(1)];
int32_t *new_row_splits_data = new_row_splits.Data();
const int32_t *old_row_splits_data = src_shape.RowSplits(1).Data(),
*new2old_data = renumbering.New2Old().Data();
// set `new_row_splits_data`.
#ifndef NDEBUG
{
Array1<int32_t> is_ok(c, 1, 1);
int32_t *is_ok_data = is_ok.Data();
int32_t old_num_lists = src_shape.Dim0();
const int32_t *old2new_data = renumbering.Old2New().Data();
K2_EVAL(c, old_num_lists, lambda_check_preconditions, (int32_t i) -> void {
if (old2new_data[i+1] == old2new_data[i]) { // This list not kept
if (old_row_splits_data[i+1] != old_row_splits_data[i]) {
// this list was nonempty...
is_ok_data[0] = 0;
}
}
});
K2_CHECK_NE(is_ok[0], 0) << "RenumberAxis0Simple(): preconditions not met; "
"renumbering removes nonempty lists.";
}
#endif
K2_EVAL(c, new_num_lists + 1, lambda_set_new_row_splits, (int32_t new_i) -> void {
int32_t j;
if (new_i == new_num_lists) {
j = num_elems;
} else {
int32_t old_i = new2old_data[new_i];
j = old_row_splits_data[old_i];
}
new_row_splits_data[new_i] = j;
});
layers[0].row_splits = new_row_splits;
layers[0].row_ids = new_row_ids;
// no need to set its cached_tot_size; that didn't change.
return RaggedShape(layers);
}
} // namespace k2
|
1f560f649285131757077934861bfa790d8e8139.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/parquet/parquet_gpu.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/row_operators.cuh>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
initialize_chunk_hash_maps_kernel(device_span<EncColumnChunk> chunks)
{
auto chunk = chunks[blockIdx.x];
auto t = threadIdx.x;
// fut: Now that per-chunk dict is same size as ck.num_values, try to not use one block per chunk
for (size_t i = 0; i < chunk.dict_map_size; i += block_size) {
if (t + i < chunk.dict_map_size) {
new (&chunk.dict_map_slots[t + i].first) map_type::atomic_key_type{KEY_SENTINEL};
new (&chunk.dict_map_slots[t + i].second) map_type::atomic_mapped_type{VALUE_SENTINEL};
}
}
}
template <typename T>
struct equality_functor {
column_device_view const& col;
__device__ bool operator()(size_type lhs_idx, size_type rhs_idx)
{
// We don't call this for nulls so this is fine
return equality_compare(col.element<T>(lhs_idx), col.element<T>(rhs_idx));
}
};
template <typename T>
struct hash_functor {
column_device_view const& col;
__device__ auto operator()(size_type idx) { return MurmurHash3_32<T>{}(col.element<T>(idx)); }
};
struct map_insert_fn {
map_type::device_mutable_view& map;
template <typename T>
__device__ bool operator()(column_device_view const& col, size_type i)
{
if constexpr (column_device_view::has_element_accessor<T>()) {
auto hash_fn = hash_functor<T>{col};
auto equality_fn = equality_functor<T>{col};
return map.insert(std::make_pair(i, i), hash_fn, equality_fn);
} else {
cudf_assert(false && "Unsupported type to insert in map");
}
return false;
}
};
struct map_find_fn {
map_type::device_view& map;
template <typename T>
__device__ auto operator()(column_device_view const& col, size_type i)
{
if constexpr (column_device_view::has_element_accessor<T>()) {
auto hash_fn = hash_functor<T>{col};
auto equality_fn = equality_functor<T>{col};
return map.find(i, hash_fn, equality_fn);
} else {
cudf_assert(false && "Unsupported type to insert in map");
}
return map.end();
}
};
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
populate_chunk_hash_maps_kernel(cudf::detail::device_2dspan<EncColumnChunk> chunks,
cudf::detail::device_2dspan<gpu::PageFragment const> frags)
{
auto col_idx = blockIdx.y;
auto block_x = blockIdx.x;
auto t = threadIdx.x;
auto frag = frags[col_idx][block_x];
auto chunk = frag.chunk;
auto col = chunk->col_desc;
size_type start_row = frag.start_row;
size_type end_row = frag.start_row + frag.num_rows;
__shared__ size_type s_start_value_idx;
__shared__ size_type s_num_values;
if (not chunk->use_dictionary) { return; }
if (t == 0) {
// Find the bounds of values in leaf column to be inserted into the map for current chunk
auto cudf_col = *(col->parent_column);
s_start_value_idx = row_to_value_idx(start_row, cudf_col);
auto end_value_idx = row_to_value_idx(end_row, cudf_col);
s_num_values = end_value_idx - s_start_value_idx;
}
__syncthreads();
column_device_view const& data_col = *col->leaf_column;
using block_reduce = hipcub::BlockReduce<size_type, block_size>;
__shared__ typename block_reduce::TempStorage reduce_storage;
// Make a view of the hash map
auto hash_map_mutable = map_type::device_mutable_view(
chunk->dict_map_slots, chunk->dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
auto hash_map = map_type::device_view(
chunk->dict_map_slots, chunk->dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
__shared__ int total_num_dict_entries;
for (size_type i = 0; i < s_num_values; i += block_size) {
// add the value to hash map
size_type val_idx = i + t + s_start_value_idx;
bool is_valid =
(i + t < s_num_values && val_idx < data_col.size()) and data_col.is_valid(val_idx);
// insert element at val_idx to hash map and count successful insertions
size_type is_unique = 0;
size_type uniq_elem_size = 0;
if (is_valid) {
auto found_slot = type_dispatcher(data_col.type(), map_find_fn{hash_map}, data_col, val_idx);
if (found_slot == hash_map.end()) {
is_unique =
type_dispatcher(data_col.type(), map_insert_fn{hash_map_mutable}, data_col, val_idx);
uniq_elem_size = [&]() -> size_type {
if (not is_unique) { return 0; }
switch (col->physical_type) {
case Type::INT32: return 4;
case Type::INT64: return 8;
case Type::INT96: return 12;
case Type::FLOAT: return 4;
case Type::DOUBLE: return 8;
case Type::BYTE_ARRAY:
if (data_col.type().id() == type_id::STRING) {
// Strings are stored as 4 byte length + string bytes
return 4 + data_col.element<string_view>(val_idx).size_bytes();
}
case Type::FIXED_LEN_BYTE_ARRAY:
if (data_col.type().id() == type_id::DECIMAL128) { return sizeof(__int128_t); }
default: cudf_assert(false && "Unsupported type for dictionary encoding"); return 0;
}
}();
}
}
__syncthreads();
auto num_unique = block_reduce(reduce_storage).Sum(is_unique);
__syncthreads();
auto uniq_data_size = block_reduce(reduce_storage).Sum(uniq_elem_size);
if (t == 0) {
total_num_dict_entries = atomicAdd(&chunk->num_dict_entries, num_unique);
total_num_dict_entries += num_unique;
atomicAdd(&chunk->uniq_data_size, uniq_data_size);
}
__syncthreads();
// Check if the num unique values in chunk has already exceeded max dict size and early exit
if (total_num_dict_entries > MAX_DICT_SIZE) { return; }
}
}
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
collect_map_entries_kernel(device_span<EncColumnChunk> chunks)
{
auto& chunk = chunks[blockIdx.x];
if (not chunk.use_dictionary) { return; }
auto t = threadIdx.x;
auto map =
map_type::device_view(chunk.dict_map_slots, chunk.dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
__shared__ size_type counter;
if (t == 0) counter = 0;
__syncthreads();
for (size_t i = 0; i < chunk.dict_map_size; i += block_size) {
if (t + i < chunk.dict_map_size) {
auto slot = map.begin_slot() + t + i;
auto key = static_cast<map_type::key_type>(slot->first);
if (key != KEY_SENTINEL) {
auto loc = atomicAdd(&counter, 1);
cudf_assert(loc < MAX_DICT_SIZE && "Number of filled slots exceeds max dict size");
chunk.dict_data[loc] = key;
// If sorting dict page ever becomes a hard requirement, enable the following statement and
// add a dict sorting step before storing into the slot's second field.
// chunk.dict_data_idx[loc] = t + i;
slot->second.store(loc);
// TODO: ^ This doesn't need to be atomic. Try casting to value_type ptr and just writing.
}
}
}
}
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
get_dictionary_indices_kernel(cudf::detail::device_2dspan<EncColumnChunk> chunks,
cudf::detail::device_2dspan<gpu::PageFragment const> frags)
{
auto col_idx = blockIdx.y;
auto block_x = blockIdx.x;
auto t = threadIdx.x;
auto frag = frags[col_idx][block_x];
auto chunk = frag.chunk;
auto col = chunk->col_desc;
size_type start_row = frag.start_row;
size_type end_row = frag.start_row + frag.num_rows;
__shared__ size_type s_start_value_idx;
__shared__ size_type s_ck_start_val_idx;
__shared__ size_type s_num_values;
if (t == 0) {
// Find the bounds of values in leaf column to be searched in the map for current chunk
auto cudf_col = *(col->parent_column);
s_start_value_idx = row_to_value_idx(start_row, cudf_col);
s_ck_start_val_idx = row_to_value_idx(chunk->start_row, cudf_col);
auto end_value_idx = row_to_value_idx(end_row, cudf_col);
s_num_values = end_value_idx - s_start_value_idx;
}
__syncthreads();
if (not chunk->use_dictionary) { return; }
column_device_view const& data_col = *col->leaf_column;
auto map = map_type::device_view(
chunk->dict_map_slots, chunk->dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
for (size_t i = 0; i < s_num_values; i += block_size) {
if (t + i < s_num_values) {
auto val_idx = s_start_value_idx + t + i;
bool is_valid =
(i + t < s_num_values && val_idx < data_col.size()) ? data_col.is_valid(val_idx) : false;
if (is_valid) {
auto found_slot = type_dispatcher(data_col.type(), map_find_fn{map}, data_col, val_idx);
cudf_assert(found_slot != map.end() &&
"Unable to find value in map in dictionary index construction");
if (found_slot != map.end()) {
// No need for atomic as this is not going to be modified by any other thread
auto* val_ptr = reinterpret_cast<map_type::mapped_type*>(&found_slot->second);
chunk->dict_index[val_idx - s_ck_start_val_idx] = *val_ptr;
}
}
}
}
}
void initialize_chunk_hash_maps(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream)
{
constexpr int block_size = 1024;
hipLaunchKernelGGL(( initialize_chunk_hash_maps_kernel<block_size>)
, dim3(chunks.size()), dim3(block_size), 0, stream.value(), chunks);
}
void populate_chunk_hash_maps(cudf::detail::device_2dspan<EncColumnChunk> chunks,
cudf::detail::device_2dspan<gpu::PageFragment const> frags,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 256;
dim3 const dim_grid(frags.size().second, frags.size().first);
hipLaunchKernelGGL(( populate_chunk_hash_maps_kernel<block_size>)
, dim3(dim_grid), dim3(block_size), 0, stream.value(), chunks, frags);
}
void collect_map_entries(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream)
{
constexpr int block_size = 1024;
hipLaunchKernelGGL(( collect_map_entries_kernel<block_size>), dim3(chunks.size()), dim3(block_size), 0, stream.value(), chunks);
}
void get_dictionary_indices(cudf::detail::device_2dspan<EncColumnChunk> chunks,
cudf::detail::device_2dspan<gpu::PageFragment const> frags,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 256;
dim3 const dim_grid(frags.size().second, frags.size().first);
hipLaunchKernelGGL(( get_dictionary_indices_kernel<block_size>)
, dim3(dim_grid), dim3(block_size), 0, stream.value(), chunks, frags);
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
| 1f560f649285131757077934861bfa790d8e8139.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <io/parquet/parquet_gpu.hpp>
#include <cudf/detail/iterator.cuh>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/row_operators.cuh>
#include <rmm/exec_policy.hpp>
namespace cudf {
namespace io {
namespace parquet {
namespace gpu {
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
initialize_chunk_hash_maps_kernel(device_span<EncColumnChunk> chunks)
{
auto chunk = chunks[blockIdx.x];
auto t = threadIdx.x;
// fut: Now that per-chunk dict is same size as ck.num_values, try to not use one block per chunk
for (size_t i = 0; i < chunk.dict_map_size; i += block_size) {
if (t + i < chunk.dict_map_size) {
new (&chunk.dict_map_slots[t + i].first) map_type::atomic_key_type{KEY_SENTINEL};
new (&chunk.dict_map_slots[t + i].second) map_type::atomic_mapped_type{VALUE_SENTINEL};
}
}
}
template <typename T>
struct equality_functor {
column_device_view const& col;
__device__ bool operator()(size_type lhs_idx, size_type rhs_idx)
{
// We don't call this for nulls so this is fine
return equality_compare(col.element<T>(lhs_idx), col.element<T>(rhs_idx));
}
};
template <typename T>
struct hash_functor {
column_device_view const& col;
__device__ auto operator()(size_type idx) { return MurmurHash3_32<T>{}(col.element<T>(idx)); }
};
struct map_insert_fn {
map_type::device_mutable_view& map;
template <typename T>
__device__ bool operator()(column_device_view const& col, size_type i)
{
if constexpr (column_device_view::has_element_accessor<T>()) {
auto hash_fn = hash_functor<T>{col};
auto equality_fn = equality_functor<T>{col};
return map.insert(std::make_pair(i, i), hash_fn, equality_fn);
} else {
cudf_assert(false && "Unsupported type to insert in map");
}
return false;
}
};
struct map_find_fn {
map_type::device_view& map;
template <typename T>
__device__ auto operator()(column_device_view const& col, size_type i)
{
if constexpr (column_device_view::has_element_accessor<T>()) {
auto hash_fn = hash_functor<T>{col};
auto equality_fn = equality_functor<T>{col};
return map.find(i, hash_fn, equality_fn);
} else {
cudf_assert(false && "Unsupported type to insert in map");
}
return map.end();
}
};
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
populate_chunk_hash_maps_kernel(cudf::detail::device_2dspan<EncColumnChunk> chunks,
cudf::detail::device_2dspan<gpu::PageFragment const> frags)
{
auto col_idx = blockIdx.y;
auto block_x = blockIdx.x;
auto t = threadIdx.x;
auto frag = frags[col_idx][block_x];
auto chunk = frag.chunk;
auto col = chunk->col_desc;
size_type start_row = frag.start_row;
size_type end_row = frag.start_row + frag.num_rows;
__shared__ size_type s_start_value_idx;
__shared__ size_type s_num_values;
if (not chunk->use_dictionary) { return; }
if (t == 0) {
// Find the bounds of values in leaf column to be inserted into the map for current chunk
auto cudf_col = *(col->parent_column);
s_start_value_idx = row_to_value_idx(start_row, cudf_col);
auto end_value_idx = row_to_value_idx(end_row, cudf_col);
s_num_values = end_value_idx - s_start_value_idx;
}
__syncthreads();
column_device_view const& data_col = *col->leaf_column;
using block_reduce = cub::BlockReduce<size_type, block_size>;
__shared__ typename block_reduce::TempStorage reduce_storage;
// Make a view of the hash map
auto hash_map_mutable = map_type::device_mutable_view(
chunk->dict_map_slots, chunk->dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
auto hash_map = map_type::device_view(
chunk->dict_map_slots, chunk->dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
__shared__ int total_num_dict_entries;
for (size_type i = 0; i < s_num_values; i += block_size) {
// add the value to hash map
size_type val_idx = i + t + s_start_value_idx;
bool is_valid =
(i + t < s_num_values && val_idx < data_col.size()) and data_col.is_valid(val_idx);
// insert element at val_idx to hash map and count successful insertions
size_type is_unique = 0;
size_type uniq_elem_size = 0;
if (is_valid) {
auto found_slot = type_dispatcher(data_col.type(), map_find_fn{hash_map}, data_col, val_idx);
if (found_slot == hash_map.end()) {
is_unique =
type_dispatcher(data_col.type(), map_insert_fn{hash_map_mutable}, data_col, val_idx);
uniq_elem_size = [&]() -> size_type {
if (not is_unique) { return 0; }
switch (col->physical_type) {
case Type::INT32: return 4;
case Type::INT64: return 8;
case Type::INT96: return 12;
case Type::FLOAT: return 4;
case Type::DOUBLE: return 8;
case Type::BYTE_ARRAY:
if (data_col.type().id() == type_id::STRING) {
// Strings are stored as 4 byte length + string bytes
return 4 + data_col.element<string_view>(val_idx).size_bytes();
}
case Type::FIXED_LEN_BYTE_ARRAY:
if (data_col.type().id() == type_id::DECIMAL128) { return sizeof(__int128_t); }
default: cudf_assert(false && "Unsupported type for dictionary encoding"); return 0;
}
}();
}
}
__syncthreads();
auto num_unique = block_reduce(reduce_storage).Sum(is_unique);
__syncthreads();
auto uniq_data_size = block_reduce(reduce_storage).Sum(uniq_elem_size);
if (t == 0) {
total_num_dict_entries = atomicAdd(&chunk->num_dict_entries, num_unique);
total_num_dict_entries += num_unique;
atomicAdd(&chunk->uniq_data_size, uniq_data_size);
}
__syncthreads();
// Check if the num unique values in chunk has already exceeded max dict size and early exit
if (total_num_dict_entries > MAX_DICT_SIZE) { return; }
}
}
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
collect_map_entries_kernel(device_span<EncColumnChunk> chunks)
{
auto& chunk = chunks[blockIdx.x];
if (not chunk.use_dictionary) { return; }
auto t = threadIdx.x;
auto map =
map_type::device_view(chunk.dict_map_slots, chunk.dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
__shared__ size_type counter;
if (t == 0) counter = 0;
__syncthreads();
for (size_t i = 0; i < chunk.dict_map_size; i += block_size) {
if (t + i < chunk.dict_map_size) {
auto slot = map.begin_slot() + t + i;
auto key = static_cast<map_type::key_type>(slot->first);
if (key != KEY_SENTINEL) {
auto loc = atomicAdd(&counter, 1);
cudf_assert(loc < MAX_DICT_SIZE && "Number of filled slots exceeds max dict size");
chunk.dict_data[loc] = key;
// If sorting dict page ever becomes a hard requirement, enable the following statement and
// add a dict sorting step before storing into the slot's second field.
// chunk.dict_data_idx[loc] = t + i;
slot->second.store(loc);
// TODO: ^ This doesn't need to be atomic. Try casting to value_type ptr and just writing.
}
}
}
}
template <int block_size>
__global__ void __launch_bounds__(block_size, 1)
get_dictionary_indices_kernel(cudf::detail::device_2dspan<EncColumnChunk> chunks,
cudf::detail::device_2dspan<gpu::PageFragment const> frags)
{
auto col_idx = blockIdx.y;
auto block_x = blockIdx.x;
auto t = threadIdx.x;
auto frag = frags[col_idx][block_x];
auto chunk = frag.chunk;
auto col = chunk->col_desc;
size_type start_row = frag.start_row;
size_type end_row = frag.start_row + frag.num_rows;
__shared__ size_type s_start_value_idx;
__shared__ size_type s_ck_start_val_idx;
__shared__ size_type s_num_values;
if (t == 0) {
// Find the bounds of values in leaf column to be searched in the map for current chunk
auto cudf_col = *(col->parent_column);
s_start_value_idx = row_to_value_idx(start_row, cudf_col);
s_ck_start_val_idx = row_to_value_idx(chunk->start_row, cudf_col);
auto end_value_idx = row_to_value_idx(end_row, cudf_col);
s_num_values = end_value_idx - s_start_value_idx;
}
__syncthreads();
if (not chunk->use_dictionary) { return; }
column_device_view const& data_col = *col->leaf_column;
auto map = map_type::device_view(
chunk->dict_map_slots, chunk->dict_map_size, KEY_SENTINEL, VALUE_SENTINEL);
for (size_t i = 0; i < s_num_values; i += block_size) {
if (t + i < s_num_values) {
auto val_idx = s_start_value_idx + t + i;
bool is_valid =
(i + t < s_num_values && val_idx < data_col.size()) ? data_col.is_valid(val_idx) : false;
if (is_valid) {
auto found_slot = type_dispatcher(data_col.type(), map_find_fn{map}, data_col, val_idx);
cudf_assert(found_slot != map.end() &&
"Unable to find value in map in dictionary index construction");
if (found_slot != map.end()) {
// No need for atomic as this is not going to be modified by any other thread
auto* val_ptr = reinterpret_cast<map_type::mapped_type*>(&found_slot->second);
chunk->dict_index[val_idx - s_ck_start_val_idx] = *val_ptr;
}
}
}
}
}
void initialize_chunk_hash_maps(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream)
{
constexpr int block_size = 1024;
initialize_chunk_hash_maps_kernel<block_size>
<<<chunks.size(), block_size, 0, stream.value()>>>(chunks);
}
void populate_chunk_hash_maps(cudf::detail::device_2dspan<EncColumnChunk> chunks,
cudf::detail::device_2dspan<gpu::PageFragment const> frags,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 256;
dim3 const dim_grid(frags.size().second, frags.size().first);
populate_chunk_hash_maps_kernel<block_size>
<<<dim_grid, block_size, 0, stream.value()>>>(chunks, frags);
}
void collect_map_entries(device_span<EncColumnChunk> chunks, rmm::cuda_stream_view stream)
{
constexpr int block_size = 1024;
collect_map_entries_kernel<block_size><<<chunks.size(), block_size, 0, stream.value()>>>(chunks);
}
void get_dictionary_indices(cudf::detail::device_2dspan<EncColumnChunk> chunks,
cudf::detail::device_2dspan<gpu::PageFragment const> frags,
rmm::cuda_stream_view stream)
{
constexpr int block_size = 256;
dim3 const dim_grid(frags.size().second, frags.size().first);
get_dictionary_indices_kernel<block_size>
<<<dim_grid, block_size, 0, stream.value()>>>(chunks, frags);
}
} // namespace gpu
} // namespace parquet
} // namespace io
} // namespace cudf
|
f2f62c1958410d7e27ef977418362905d4ec8ec8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
__global__ void
solver_kernel_naive(){
}
__global__ void
solver_kernel_optimized(){
}
#endif /* _MATRIXMUL_KERNEL_H_ */
| f2f62c1958410d7e27ef977418362905d4ec8ec8.cu | #ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
__global__ void
solver_kernel_naive(){
}
__global__ void
solver_kernel_optimized(){
}
#endif /* _MATRIXMUL_KERNEL_H_ */
|
dba07da427711eec87d12ae893f8e601d3dcc6bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_runtime.h"
__global__ void transpose_kernel(float *odata, const float *idata,
const uint *buf, const uint ndims,
size_t size) {
const uint *in_strides = buf;
const uint *out_strides = buf + ndims;
const uint *perm = buf + ndims * 2;
size_t o_idx = blockIdx.x * blockDim.x + threadIdx.x;
uint i_idx = 0;
uint t = o_idx;
for (int i = 0; i < ndims; ++i) {
const uint ratio = t / out_strides[i];
t -= ratio * out_strides[i];
i_idx += ratio * in_strides[perm[i]];
}
odata[o_idx] = idata[i_idx];
}
int DLGpuTranspose(const DLArrayHandle input, DLArrayHandle output, int *perm,
DLStreamHandle stream_handle = NULL) {
uint ndim = uint(input->ndim);
uint ndim_ = uint(output->ndim);
assert(ndim == ndim_);
int64_t *in_dims = input->shape;
int64_t *out_dims = output->shape;
float *input_data = (float *)input->data;
float *output_data = (float *)output->data;
uint *buf = (uint *)malloc(3 * ndim * sizeof(uint));
uint *gpu_buf = NULL;
uint in_stride = 1;
uint out_stride = 1;
for (int i = ndim - 1; i >= 0; --i) {
buf[i] = uint(in_stride);
buf[ndim + i] = uint(out_stride);
buf[ndim * 2 + i] = uint(perm[i]);
in_stride *= uint(in_dims[i]);
out_stride *= uint(out_dims[i]);
}
assert(in_stride == out_stride);
size_t size = in_stride;
int dev_id = (input->ctx).device_id;
if (is_chunk_init(dev_id) == false) {
chunk_init(dev_id);
}
size_t buf_size = 3 * ndim * sizeof(uint);
gpu_buf = (uint *)find_chunk(buf_size, dev_id);
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
hipStream_t cu_stream = static_cast<hipStream_t>(
stream_handle ? *(hipStream_t *)(stream_handle->handle) : NULL);
if (cu_stream != NULL) {
CUDA_CALL(hipMemcpyAsync(gpu_buf, (void *)buf, buf_size,
hipMemcpyHostToDevice, cu_stream));
hipLaunchKernelGGL(( transpose_kernel), dim3(blocks), dim3(threads), 0, cu_stream,
output_data, input_data, gpu_buf, ndim, size);
} else {
CUDA_CALL(
hipMemcpy(gpu_buf, (void *)buf, buf_size, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( transpose_kernel), dim3(blocks), dim3(threads), 0, 0, output_data, input_data, gpu_buf,
ndim, size);
}
del_chunk(gpu_buf, dev_id);
free(buf);
return 0;
}
int DLGpuTransposeSimple(const DLArrayHandle input, DLArrayHandle output,
const DLArrayHandle gpu_buffer,
DLStreamHandle stream_handle = NULL) {
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
const uint *gpu_buf = (const uint *)gpu_buffer->data;
const uint ndim = output->ndim;
size_t size = 1;
for (uint i = 0; i < ndim; ++i) {
size *= output->shape[i];
}
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle) {
hipStream_t cu_stream = (*(hipStream_t *)(stream_handle->handle));
hipLaunchKernelGGL(( transpose_kernel), dim3(blocks), dim3(threads), 0, cu_stream,
output_data, input_data, gpu_buf, ndim, size);
} else {
hipLaunchKernelGGL(( transpose_kernel), dim3(blocks), dim3(threads), 0, 0, output_data, input_data, gpu_buf,
ndim, size);
}
return 0;
} | dba07da427711eec87d12ae893f8e601d3dcc6bf.cu | #include "gpu_runtime.h"
__global__ void transpose_kernel(float *odata, const float *idata,
const uint *buf, const uint ndims,
size_t size) {
const uint *in_strides = buf;
const uint *out_strides = buf + ndims;
const uint *perm = buf + ndims * 2;
size_t o_idx = blockIdx.x * blockDim.x + threadIdx.x;
uint i_idx = 0;
uint t = o_idx;
for (int i = 0; i < ndims; ++i) {
const uint ratio = t / out_strides[i];
t -= ratio * out_strides[i];
i_idx += ratio * in_strides[perm[i]];
}
odata[o_idx] = idata[i_idx];
}
int DLGpuTranspose(const DLArrayHandle input, DLArrayHandle output, int *perm,
DLStreamHandle stream_handle = NULL) {
uint ndim = uint(input->ndim);
uint ndim_ = uint(output->ndim);
assert(ndim == ndim_);
int64_t *in_dims = input->shape;
int64_t *out_dims = output->shape;
float *input_data = (float *)input->data;
float *output_data = (float *)output->data;
uint *buf = (uint *)malloc(3 * ndim * sizeof(uint));
uint *gpu_buf = NULL;
uint in_stride = 1;
uint out_stride = 1;
for (int i = ndim - 1; i >= 0; --i) {
buf[i] = uint(in_stride);
buf[ndim + i] = uint(out_stride);
buf[ndim * 2 + i] = uint(perm[i]);
in_stride *= uint(in_dims[i]);
out_stride *= uint(out_dims[i]);
}
assert(in_stride == out_stride);
size_t size = in_stride;
int dev_id = (input->ctx).device_id;
if (is_chunk_init(dev_id) == false) {
chunk_init(dev_id);
}
size_t buf_size = 3 * ndim * sizeof(uint);
gpu_buf = (uint *)find_chunk(buf_size, dev_id);
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
cudaStream_t cu_stream = static_cast<cudaStream_t>(
stream_handle ? *(cudaStream_t *)(stream_handle->handle) : NULL);
if (cu_stream != NULL) {
CUDA_CALL(cudaMemcpyAsync(gpu_buf, (void *)buf, buf_size,
cudaMemcpyHostToDevice, cu_stream));
transpose_kernel<<<blocks, threads, 0, cu_stream>>>(
output_data, input_data, gpu_buf, ndim, size);
} else {
CUDA_CALL(
cudaMemcpy(gpu_buf, (void *)buf, buf_size, cudaMemcpyHostToDevice));
transpose_kernel<<<blocks, threads>>>(output_data, input_data, gpu_buf,
ndim, size);
}
del_chunk(gpu_buf, dev_id);
free(buf);
return 0;
}
int DLGpuTransposeSimple(const DLArrayHandle input, DLArrayHandle output,
const DLArrayHandle gpu_buffer,
DLStreamHandle stream_handle = NULL) {
const float *input_data = (const float *)input->data;
float *output_data = (float *)output->data;
const uint *gpu_buf = (const uint *)gpu_buffer->data;
const uint ndim = output->ndim;
size_t size = 1;
for (uint i = 0; i < ndim; ++i) {
size *= output->shape[i];
}
dim3 blocks;
dim3 threads;
if (size <= 1024) {
threads.x = size;
blocks.x = 1;
} else {
threads.x = 1024;
blocks.x = (size + 1023) / 1024;
}
if (stream_handle) {
cudaStream_t cu_stream = (*(cudaStream_t *)(stream_handle->handle));
transpose_kernel<<<blocks, threads, 0, cu_stream>>>(
output_data, input_data, gpu_buf, ndim, size);
} else {
transpose_kernel<<<blocks, threads>>>(output_data, input_data, gpu_buf,
ndim, size);
}
return 0;
} |
48b7148da45132d18bbb0a4904c560c25349089b.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file color_app.cu
*
* @brief Graph Coloring Gunrock Application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definitions
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/graphio/graphio.cuh>
// Graph Coloring
#include <gunrock/app/color/color_enactor.cuh>
#include <gunrock/app/color/color_test.cuh>
#include <gunrock/util/info_rapidjson.cuh>
// Others
#include <cstdio>
namespace gunrock {
namespace app {
namespace color {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
/*
GUARD_CU(parameters.Use<unsigned int>(
"num-colors",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::INTERNAL_PARAMETER,
0, "number of output colors", __FILE__, __LINE__));
*/
GUARD_CU(parameters.Use<std::string>(
"tag", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, "",
"tag info for json string", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"loop-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Serially compare rand to all node neighbor, disable to use advance \
neighbor reduce (default=false)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"min-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Enable coloring with minimum independent set as well as \
maximum(default=true)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"test-run", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Perform test run to atomically generate max iteration (default=true)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"user-iter",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
3, "Number of iterations color should run for (default=3).", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<bool>(
"JPL", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false,
"Use JPL exact coloring method (true=use JPL).", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"no-conflict", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0,
"Resolve color conflict, 0 to skip check, 1 to check at end of\
every iteration with random,\
2 to check at end of every iteration with degree(default = 0).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"prohibit-size", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0,
"Needed to allocate memory for hash function, if parameter is\
positive,\
hash coloring is used instead of random coloring (default = 0).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"seed", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, time(NULL),
"seed for random number generator", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"LBCOLOR", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false,
"load balancing enabled for graph coloring (true=neighbor_reduce)",
__FILE__, __LINE__));
return retval;
}
/**
* @brief Run color tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return hipError_t error message(s), if any
*/
template <typename GraphT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
bool color_balance, typename GraphT::VertexT *ref_colors,
util::Location target) {
hipError_t retval = hipSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("color", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
VertexT *h_colors = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
int num_colors = 0;
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_colors));
SizeT num_errors = Validate_Results(parameters, graph, h_colors,
ref_colors, &num_colors, false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_colors));
if (validation == "last") {
SizeT num_errors = Validate_Results(parameters, graph, h_colors, ref_colors,
&num_colors, false);
}
printf("Number of colors needed: %d\n", num_colors);
UseParameters_test(parameters);
// parameters.Set("num-colors", num_colors);
info.SetVal("num-colors", std::to_string(num_colors));
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_colors;
h_colors = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace color
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_color function
* @tparam GraphT Type of the graph
* @tparam VertexT Type of the colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename VertexT = typename GraphT::VertexT,
typename SizeT = typename GraphT::SizeT>
double gunrock_color(gunrock::util::Parameters ¶meters, GraphT &graph,
VertexT **colors, SizeT *num_colors) {
typedef gunrock::app::color::Problem<GraphT> ProblemT;
typedef gunrock::app::color::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
for (int run_num = 0; run_num < num_runs; ++run_num) {
problem.Reset(target);
enactor.Reset(target);
cpu_timer.Start();
enactor.Enact();
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(colors[run_num]);
// count number of colors
std::unordered_set<int> set;
for (SizeT v = 0; v < graph.nodes; v++) {
int c = colors[run_num][v];
if (set.find(c) == set.end()) {
set.insert(c);
num_colors[run_num] += 1;
}
}
}
enactor.Release(target);
problem.Release(target);
return total_time;
}
/*
* @brief Entry of gunrock_color function
* @tparam VertexT Type of the colors
* @tparam SizeT Type of the num_colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int>
double color(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const int num_runs, int **colors, int *num_colors,
const GValueT edge_values = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("color");
gunrock::graphio::UseParameters(parameters);
gunrock::app::color::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges,
gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the graph coloring
double elapsed_time = gunrock_color(parameters, graph, colors, num_colors);
// Cleanup
graph.Release();
return elapsed_time;
}
/*
* @brief Entry of gunrock_color function
* @tparam VertexT Type of the colors
* @tparam SizeT Type of the num_colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
double color(const int num_nodes, const int num_edges, const int *row_offsets,
const int *col_indices, int *colors, int num_colors) {
return color(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */,
&colors, &num_colors);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| 48b7148da45132d18bbb0a4904c560c25349089b.cu | // ----------------------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------------------
/**
* @file color_app.cu
*
* @brief Graph Coloring Gunrock Application
*/
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definitions
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
#include <gunrock/graphio/graphio.cuh>
// Graph Coloring
#include <gunrock/app/color/color_enactor.cuh>
#include <gunrock/app/color/color_test.cuh>
#include <gunrock/util/info_rapidjson.cuh>
// Others
#include <cstdio>
namespace gunrock {
namespace app {
namespace color {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(UseParameters_test(parameters));
/*
GUARD_CU(parameters.Use<unsigned int>(
"num-colors",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::INTERNAL_PARAMETER,
0, "number of output colors", __FILE__, __LINE__));
*/
GUARD_CU(parameters.Use<std::string>(
"tag", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, "",
"tag info for json string", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"loop-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Serially compare rand to all node neighbor, disable to use advance \
neighbor reduce (default=false)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"min-color", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Enable coloring with minimum independent set as well as \
maximum(default=true)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"test-run", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, true,
"Perform test run to atomically generate max iteration (default=true)",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"user-iter",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
3, "Number of iterations color should run for (default=3).", __FILE__,
__LINE__));
GUARD_CU(parameters.Use<bool>(
"JPL", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false,
"Use JPL exact coloring method (true=use JPL).", __FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"no-conflict", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0,
"Resolve color conflict, 0 to skip check, 1 to check at end of\
every iteration with random,\
2 to check at end of every iteration with degree(default = 0).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"prohibit-size", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, 0,
"Needed to allocate memory for hash function, if parameter is\
positive,\
hash coloring is used instead of random coloring (default = 0).",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"seed", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, time(NULL),
"seed for random number generator", __FILE__, __LINE__));
GUARD_CU(parameters.Use<bool>(
"LBCOLOR", util::REQUIRED_ARGUMENT | util::OPTIONAL_PARAMETER, false,
"load balancing enabled for graph coloring (true=neighbor_reduce)",
__FILE__, __LINE__));
return retval;
}
/**
* @brief Run color tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
...
* @param[in] target where to perform the app
* \return cudaError_t error message(s), if any
*/
template <typename GraphT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
bool color_balance, typename GraphT::VertexT *ref_colors,
util::Location target) {
cudaError_t retval = cudaSuccess;
typedef typename GraphT::VertexT VertexT;
typedef typename GraphT::ValueT ValueT;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
// CLI parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("color", parameters, graph);
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
VertexT *h_colors = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
int num_colors = 0;
for (int run_num = 0; run_num < num_runs; ++run_num) {
GUARD_CU(problem.Reset(target));
GUARD_CU(enactor.Reset(target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact());
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_colors));
SizeT num_errors = Validate_Results(parameters, graph, h_colors,
ref_colors, &num_colors, false);
}
}
cpu_timer.Start();
GUARD_CU(problem.Extract(h_colors));
if (validation == "last") {
SizeT num_errors = Validate_Results(parameters, graph, h_colors, ref_colors,
&num_colors, false);
}
printf("Number of colors needed: %d\n", num_colors);
UseParameters_test(parameters);
// parameters.Set("num-colors", num_colors);
info.SetVal("num-colors", std::to_string(num_colors));
// compute running statistics
info.ComputeTraversalStats(enactor, (VertexT *)NULL);
// Display_Memory_Usage(problem);
#ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(enactor);
#endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_colors;
h_colors = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace color
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_color function
* @tparam GraphT Type of the graph
* @tparam VertexT Type of the colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename VertexT = typename GraphT::VertexT,
typename SizeT = typename GraphT::SizeT>
double gunrock_color(gunrock::util::Parameters ¶meters, GraphT &graph,
VertexT **colors, SizeT *num_colors) {
typedef gunrock::app::color::Problem<GraphT> ProblemT;
typedef gunrock::app::color::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
for (int run_num = 0; run_num < num_runs; ++run_num) {
problem.Reset(target);
enactor.Reset(target);
cpu_timer.Start();
enactor.Enact();
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
problem.Extract(colors[run_num]);
// count number of colors
std::unordered_set<int> set;
for (SizeT v = 0; v < graph.nodes; v++) {
int c = colors[run_num][v];
if (set.find(c) == set.end()) {
set.insert(c);
num_colors[run_num] += 1;
}
}
}
enactor.Release(target);
problem.Release(target);
return total_time;
}
/*
* @brief Entry of gunrock_color function
* @tparam VertexT Type of the colors
* @tparam SizeT Type of the num_colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = unsigned int>
double color(const SizeT num_nodes, const SizeT num_edges,
const SizeT *row_offsets, const VertexT *col_indices,
const int num_runs, int **colors, int *num_colors,
const GValueT edge_values = NULL) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("color");
gunrock::graphio::UseParameters(parameters);
gunrock::app::color::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1,
gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges,
gunrock::util::HOST);
// graph.FromCsr(graph.csr(), true, quiet);
gunrock::graphio::LoadGraph(parameters, graph);
// Run the graph coloring
double elapsed_time = gunrock_color(parameters, graph, colors, num_colors);
// Cleanup
graph.Release();
return elapsed_time;
}
/*
* @brief Entry of gunrock_color function
* @tparam VertexT Type of the colors
* @tparam SizeT Type of the num_colors
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] colors Return generated colors for each run
* @param[out] num_colors Return number of colors generated for each run
* \return double Return accumulated elapsed times for all runs
*/
double color(const int num_nodes, const int num_edges, const int *row_offsets,
const int *col_indices, int *colors, int num_colors) {
return color(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */,
&colors, &num_colors);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
bd5541c5720078005fde173e410ed269ef92a714.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<iostream>
#include"error_check.h"
#include"gpu_timer.h"
#define DTYPE double
#define DTYPE_FORMAT "%lf"
#define BLOCK_SIZE 32
float time_cost_gpu = -1, time_cost_cpu = -1;
hipEvent_t gpu_start, gpu_stop, cpu_start, cpu_stop;
/* CPU implementation */
DTYPE partialSum(DTYPE *vector, int n) {
DTYPE temp = 0;
for (int i = 0; i < n; i++) {
temp += vector[i];
}
return temp;
}
/*
* Todo:
* reduction kernel in which the threads are mapped to data with stride 2
*/
__global__ void kernel_reduction_non_consecutive(DTYPE *input, DTYPE *output, int n) {
int tid = threadIdx.x, offset = blockIdx.x*blockDim.x;
for(int s = 1; s < blockDim.x && tid*2 + s < BLOCK_SIZE; s<<=1){ //thread
input[offset+tid*2] += input[offset+tid*2+s];
__syncthreads();
}
if(tid == 0)
output[blockIdx.x] = input[offset];
}
/*
* Todo:
* reduction kernel in which the threads are consecutively mapped to data
*/
__global__ void kernel_reduction_consecutive(DTYPE *input, DTYPE *output, int n) {
int tid = threadIdx.x, offset = blockIdx.x*blockDim.x;
for(int s = BLOCK_SIZE/2; s >= 1 && tid+s < BLOCK_SIZE; s>>=1){
input[offset+tid] += input[offset+tid+s];
__syncthreads();
}
if(tid == 0)
output[blockIdx.x] = input[offset];
}
/*
* Todo:
* Wrapper function that utilizes cpu computation to sum the reduced results from blocks
*/
DTYPE gpu_reduction_cpu(DTYPE *input, int n,
void (*kernel)(DTYPE *input, DTYPE *output, int n)) {
int MEM_SIZE = sizeof(DTYPE) * n;
DTYPE *in = nullptr, *out = nullptr, *output = nullptr;
CHECK(hipMalloc((void**)&in, MEM_SIZE));
CHECK(hipMalloc((void**)&out, MEM_SIZE));
output = (DTYPE*)malloc(MEM_SIZE);
CHECK(hipMemcpy(in, input, MEM_SIZE, hipMemcpyHostToDevice));
int grid = ceil((double)n/BLOCK_SIZE);
hipEventRecord(gpu_start);
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(BLOCK_SIZE), 0, 0, in, out, n);
hipEventRecord(gpu_stop);
hipEventSynchronize(gpu_stop);
hipEventElapsedTime(&time_cost_gpu, gpu_start, gpu_stop);
CHECK(hipMemcpy(output, out, MEM_SIZE, hipMemcpyDeviceToHost));
CHECK(hipFree(in));
CHECK(hipFree(out));
DTYPE sum = 0;
for(int i = 0; i < grid; i += 1){
sum += output[i];
}
free(output);
return sum;
}
DTYPE* test_data_gen(int n) {
srand(time(0));
DTYPE *data = (DTYPE *) malloc(n * sizeof(DTYPE));
for (int i = 0; i < n; i++) {
data[i] = 1.0 * (rand() % RAND_MAX) / RAND_MAX;
}
return data;
}
void test(int n,
DTYPE (*reduction)(DTYPE *input, int n,
void (*kernel)(DTYPE *input, DTYPE *output, int n)),
void (*kernel)(DTYPE *input, DTYPE *output, int n))
{
DTYPE computed_result, computed_result_gpu;
DTYPE *vector_input;
vector_input = test_data_gen(n);
printf("---------------------------\n");
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_stop);
hipEventCreate(&cpu_start);
hipEventCreate(&cpu_stop);
///cpu
hipEventRecord(cpu_start);
computed_result = partialSum(vector_input, n);
hipEventRecord(cpu_stop);
hipEventSynchronize(cpu_stop);
hipEventElapsedTime(&time_cost_cpu, cpu_start, cpu_stop);
printf("Time cost (CPU):%f ms \n", time_cost_cpu);
///
///gpu
computed_result_gpu = reduction(vector_input, n, kernel);
printf("Time cost (GPU):%f ms \n", time_cost_gpu);
///
printf("[%d] Computed sum (CPU): ", n);
printf(DTYPE_FORMAT, computed_result);
printf(" GPU result:");
printf(DTYPE_FORMAT, computed_result_gpu);
if (abs(computed_result_gpu - computed_result) < 1e-3) {
printf(" PASSED! \n");
} else {
printf(" FAILED! \n");
}
printf("\n");
free(vector_input);
}
int main(int argc, char **argv) {
int n_arr[] = {1, 7, 585, 5000, 300001, 1<<20};
for(int i=0; i<sizeof(n_arr)/sizeof(int); i++)
{
test(n_arr[i], gpu_reduction_cpu, kernel_reduction_non_consecutive);
test(n_arr[i], gpu_reduction_cpu, kernel_reduction_consecutive);
}
return 0;
} | bd5541c5720078005fde173e410ed269ef92a714.cu | #include<stdio.h>
#include<stdlib.h>
#include<iostream>
#include"error_check.h"
#include"gpu_timer.h"
#define DTYPE double
#define DTYPE_FORMAT "%lf"
#define BLOCK_SIZE 32
float time_cost_gpu = -1, time_cost_cpu = -1;
cudaEvent_t gpu_start, gpu_stop, cpu_start, cpu_stop;
/* CPU implementation */
DTYPE partialSum(DTYPE *vector, int n) {
DTYPE temp = 0;
for (int i = 0; i < n; i++) {
temp += vector[i];
}
return temp;
}
/*
* Todo:
* reduction kernel in which the threads are mapped to data with stride 2
*/
__global__ void kernel_reduction_non_consecutive(DTYPE *input, DTYPE *output, int n) {
int tid = threadIdx.x, offset = blockIdx.x*blockDim.x;
for(int s = 1; s < blockDim.x && tid*2 + s < BLOCK_SIZE; s<<=1){ //主要防无关thread多加
input[offset+tid*2] += input[offset+tid*2+s];
__syncthreads();
}
if(tid == 0)
output[blockIdx.x] = input[offset];
}
/*
* Todo:
* reduction kernel in which the threads are consecutively mapped to data
*/
__global__ void kernel_reduction_consecutive(DTYPE *input, DTYPE *output, int n) {
int tid = threadIdx.x, offset = blockIdx.x*blockDim.x;
for(int s = BLOCK_SIZE/2; s >= 1 && tid+s < BLOCK_SIZE; s>>=1){
input[offset+tid] += input[offset+tid+s];
__syncthreads();
}
if(tid == 0)
output[blockIdx.x] = input[offset];
}
/*
* Todo:
* Wrapper function that utilizes cpu computation to sum the reduced results from blocks
*/
DTYPE gpu_reduction_cpu(DTYPE *input, int n,
void (*kernel)(DTYPE *input, DTYPE *output, int n)) {
int MEM_SIZE = sizeof(DTYPE) * n;
DTYPE *in = nullptr, *out = nullptr, *output = nullptr;
CHECK(cudaMalloc((void**)&in, MEM_SIZE));
CHECK(cudaMalloc((void**)&out, MEM_SIZE));
output = (DTYPE*)malloc(MEM_SIZE);
CHECK(cudaMemcpy(in, input, MEM_SIZE, cudaMemcpyHostToDevice));
int grid = ceil((double)n/BLOCK_SIZE);
cudaEventRecord(gpu_start);
kernel<<<grid, BLOCK_SIZE>>>(in, out, n);
cudaEventRecord(gpu_stop);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&time_cost_gpu, gpu_start, gpu_stop);
CHECK(cudaMemcpy(output, out, MEM_SIZE, cudaMemcpyDeviceToHost));
CHECK(cudaFree(in));
CHECK(cudaFree(out));
DTYPE sum = 0;
for(int i = 0; i < grid; i += 1){
sum += output[i];
}
free(output);
return sum;
}
DTYPE* test_data_gen(int n) {
srand(time(0));
DTYPE *data = (DTYPE *) malloc(n * sizeof(DTYPE));
for (int i = 0; i < n; i++) {
data[i] = 1.0 * (rand() % RAND_MAX) / RAND_MAX;
}
return data;
}
void test(int n,
DTYPE (*reduction)(DTYPE *input, int n,
void (*kernel)(DTYPE *input, DTYPE *output, int n)),
void (*kernel)(DTYPE *input, DTYPE *output, int n))
{
DTYPE computed_result, computed_result_gpu;
DTYPE *vector_input;
vector_input = test_data_gen(n);
printf("---------------------------\n");
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventCreate(&cpu_start);
cudaEventCreate(&cpu_stop);
///cpu
cudaEventRecord(cpu_start);
computed_result = partialSum(vector_input, n);
cudaEventRecord(cpu_stop);
cudaEventSynchronize(cpu_stop);
cudaEventElapsedTime(&time_cost_cpu, cpu_start, cpu_stop);
printf("Time cost (CPU):%f ms \n", time_cost_cpu);
///
///gpu
computed_result_gpu = reduction(vector_input, n, kernel);
printf("Time cost (GPU):%f ms \n", time_cost_gpu);
///
printf("[%d] Computed sum (CPU): ", n);
printf(DTYPE_FORMAT, computed_result);
printf(" GPU result:");
printf(DTYPE_FORMAT, computed_result_gpu);
if (abs(computed_result_gpu - computed_result) < 1e-3) {
printf(" PASSED! \n");
} else {
printf(" FAILED! \n");
}
printf("\n");
free(vector_input);
}
int main(int argc, char **argv) {
int n_arr[] = {1, 7, 585, 5000, 300001, 1<<20};
for(int i=0; i<sizeof(n_arr)/sizeof(int); i++)
{
test(n_arr[i], gpu_reduction_cpu, kernel_reduction_non_consecutive);
test(n_arr[i], gpu_reduction_cpu, kernel_reduction_consecutive);
}
return 0;
} |
99f5c69ff275d7c1d5ff65c3960a19cbe108dd46.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=1 --blockDim=32
typedef unsigned int clock_t;
#define syncthreads __syncthreads
__global__ void sum(clock_t *d_clocks, int N)
{
__shared__ clock_t s_clocks[32];
clock_t my_sum = 0;
for (int i = threadIdx.x ; i < N ; i+= blockDim.x)
{
my_sum += d_clocks[i];
}
s_clocks[threadIdx.x] = my_sum;
syncthreads();
for (int i=16; i>0; i/=2)
{
if (threadIdx.x < i)
{
s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i];
}
syncthreads();
}
d_clocks[0] = s_clocks[0];
}
| 99f5c69ff275d7c1d5ff65c3960a19cbe108dd46.cu | //pass
//--gridDim=1 --blockDim=32
typedef unsigned int clock_t;
#define syncthreads __syncthreads
__global__ void sum(clock_t *d_clocks, int N)
{
__shared__ clock_t s_clocks[32];
clock_t my_sum = 0;
for (int i = threadIdx.x ; i < N ; i+= blockDim.x)
{
my_sum += d_clocks[i];
}
s_clocks[threadIdx.x] = my_sum;
syncthreads();
for (int i=16; i>0; i/=2)
{
if (threadIdx.x < i)
{
s_clocks[threadIdx.x] += s_clocks[threadIdx.x + i];
}
syncthreads();
}
d_clocks[0] = s_clocks[0];
}
|
a02ecd9ea1d167af0c94d6e2eb77d6fa11d78d5b.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Name : Veerakumar Natarajan
* Student Id: 200208042
*
* 2d convolution program
*/
#include <stdio.h>
#include <fstream>
#include <sstream>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <hip/hip_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the 2d convolution of A and B into C.
*/
__global__ void
convol(float *A, float *B, float *C, int row_a, int row_b, int row_c, int col_a, int col_b, int col_c)
{
int x = blockDim.y * blockIdx.y + threadIdx.y;
int y = blockDim.x * blockIdx.x + threadIdx.x;
C[x * col_c + y] = 0.0;
if(x < row_c && y < col_c) {
for(int i = 0; i < row_b; i++) {
for(int j = 0; j < col_b; j++) {
if(((x - i) < row_a && (x - i) >= 0) && ((y - j) < col_a && (y - j) >= 0))
C[x * col_c + y] += B[i * col_b + j] * A[(x - i) * col_a + (y - j)];
}
}
}
}
/**
* Host main routine
*/
int
main(int argc, char *argv[])
{
// Error code to check return values for CUDA calls
hipError_t err = hipSuccess;
float *h_A, *h_B, *h_C, tmp;
int row_a, row_b, row_c, col_a, col_b, col_c;
int a_matrix = 1;
int i, j;
int size_a, size_b, size_c;
std::ifstream file(argv[1]);
std::string row;
row_a=row_b=row_c=col_a=col_b=col_c=0;
// Finding size of matrix A and matrix B
while(std::getline(file, row)) {
if(row.empty())
a_matrix = 0;
std::istringstream iss(row);
if(a_matrix == 1) {
col_a=0;
while(iss.good()) {
iss >> tmp;
col_a++;
}
row_a++;
} else {
if(!row.empty()) {
col_b=0;
while(iss.good()) {
iss >> tmp;
col_b++;
}
row_b++;
}
}
}
row_c = row_a + row_b - 1;
col_c = col_a + col_b - 1;
// Calculating size of matrix A, B and C
size_a = row_a * col_a;
size_b = row_b * col_b;
size_c = row_c * col_c;
// Allocate the host input vector A, B
h_A = (float *)malloc(size_a * sizeof(float));
h_B = (float *)malloc(size_b * sizeof(float));
// Allocate the host output vector
h_C = (float *)malloc(size_c * sizeof(float));
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Reading value of matrix A and B from input file
std::ifstream file1(argv[1]);
a_matrix = 1;
i = j = 0;
while(std::getline(file1, row)) {
if(row.empty())
a_matrix = 0;
std::istringstream iss1(row);
if(a_matrix == 1){
while(iss1.good()) {
iss1 >> tmp;
h_A[i] = tmp;
i++;
}
} else {
if(!row.empty()) {
while(iss1.good()) {
iss1 >> tmp;
h_B[j] = tmp;
j++;
}
}
}
}
// Allocate the device input vector A
float *d_A = NULL;
err = hipMalloc((void **)&d_A, size_a * sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = hipMalloc((void **)&d_B, size_b * sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = hipMalloc((void **)&d_C, size_c * sizeof(float));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
err = hipMemcpy(d_A, h_A, size_a * sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_B, h_B, size_b * sizeof(float), hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector 2dconvol CUDA Kernel
dim3 dimBlock(row_c, col_c, 1);
dim3 dimGrid(4, 4, 1);
hipLaunchKernelGGL(( convol), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C, row_a, row_b, row_c, col_a, col_b, col_c);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
err = hipMemcpy(h_C, d_C, size_c * sizeof(float), hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free device global memory
err = hipFree(d_A);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_B);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_C);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
for(i = 0; i < row_c; i++) {
for(j = 0; j < col_c; j++) {
printf("%.3f ", h_C[i * col_c + j]);
}
printf("\n");
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// hipDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling hipDeviceReset causes all profile data to be
// flushed before the application exits
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
| a02ecd9ea1d167af0c94d6e2eb77d6fa11d78d5b.cu | /**
* Name : Veerakumar Natarajan
* Student Id: 200208042
*
* 2d convolution program
*/
#include <stdio.h>
#include <fstream>
#include <sstream>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the 2d convolution of A and B into C.
*/
__global__ void
convol(float *A, float *B, float *C, int row_a, int row_b, int row_c, int col_a, int col_b, int col_c)
{
int x = blockDim.y * blockIdx.y + threadIdx.y;
int y = blockDim.x * blockIdx.x + threadIdx.x;
C[x * col_c + y] = 0.0;
if(x < row_c && y < col_c) {
for(int i = 0; i < row_b; i++) {
for(int j = 0; j < col_b; j++) {
if(((x - i) < row_a && (x - i) >= 0) && ((y - j) < col_a && (y - j) >= 0))
C[x * col_c + y] += B[i * col_b + j] * A[(x - i) * col_a + (y - j)];
}
}
}
}
/**
* Host main routine
*/
int
main(int argc, char *argv[])
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
float *h_A, *h_B, *h_C, tmp;
int row_a, row_b, row_c, col_a, col_b, col_c;
int a_matrix = 1;
int i, j;
int size_a, size_b, size_c;
std::ifstream file(argv[1]);
std::string row;
row_a=row_b=row_c=col_a=col_b=col_c=0;
// Finding size of matrix A and matrix B
while(std::getline(file, row)) {
if(row.empty())
a_matrix = 0;
std::istringstream iss(row);
if(a_matrix == 1) {
col_a=0;
while(iss.good()) {
iss >> tmp;
col_a++;
}
row_a++;
} else {
if(!row.empty()) {
col_b=0;
while(iss.good()) {
iss >> tmp;
col_b++;
}
row_b++;
}
}
}
row_c = row_a + row_b - 1;
col_c = col_a + col_b - 1;
// Calculating size of matrix A, B and C
size_a = row_a * col_a;
size_b = row_b * col_b;
size_c = row_c * col_c;
// Allocate the host input vector A, B
h_A = (float *)malloc(size_a * sizeof(float));
h_B = (float *)malloc(size_b * sizeof(float));
// Allocate the host output vector
h_C = (float *)malloc(size_c * sizeof(float));
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Reading value of matrix A and B from input file
std::ifstream file1(argv[1]);
a_matrix = 1;
i = j = 0;
while(std::getline(file1, row)) {
if(row.empty())
a_matrix = 0;
std::istringstream iss1(row);
if(a_matrix == 1){
while(iss1.good()) {
iss1 >> tmp;
h_A[i] = tmp;
i++;
}
} else {
if(!row.empty()) {
while(iss1.good()) {
iss1 >> tmp;
h_B[j] = tmp;
j++;
}
}
}
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size_a * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
float *d_B = NULL;
err = cudaMalloc((void **)&d_B, size_b * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector C
float *d_C = NULL;
err = cudaMalloc((void **)&d_C, size_c * sizeof(float));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
err = cudaMemcpy(d_A, h_A, size_a * sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size_b * sizeof(float), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector 2dconvol CUDA Kernel
dim3 dimBlock(row_c, col_c, 1);
dim3 dimGrid(4, 4, 1);
convol<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, row_a, row_b, row_c, col_a, col_b, col_c);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
err = cudaMemcpy(h_C, d_C, size_c * sizeof(float), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
for(i = 0; i < row_c; i++) {
for(j = 0; j < col_c; j++) {
printf("%.3f ", h_C[i * col_c + j]);
}
printf("\n");
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
|
d129704b5f447af10ecd0206923723f110ab0eb7.hip | // !!! This is a file automatically generated by hipify!!!
#include <cuda/Cuda.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "helper_cuda.h"
#include <helper_functions.h>
namespace ORB_SLAM2 { namespace cuda {
void deviceSynchronize() {
checkCudaErrors( hipDeviceSynchronize() );
}
} }
| d129704b5f447af10ecd0206923723f110ab0eb7.cu | #include <cuda/Cuda.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "helper_cuda.h"
#include <helper_functions.h>
namespace ORB_SLAM2 { namespace cuda {
void deviceSynchronize() {
checkCudaErrors( cudaDeviceSynchronize() );
}
} }
|
04986bc0ec8d84f2dba68389b2cad00dc9c1b692.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef SCAN_IMPL_CU
#define SCAN_IMPL_CU
#include "scan.hip"
#include "../include/common.h"
static void scanImpl(int *d_input, int rLen, int *d_output, struct statistic * pp)
{
int len = 2;
if(rLen < len){
int *input, *output;
hipMalloc((void**)&input,len*sizeof(int));
hipMalloc((void**)&output, len*sizeof(int));
hipMemset(input, 0, len*sizeof(int));
hipMemcpy(input, d_input, rLen*sizeof(int), hipMemcpyDeviceToDevice);
preallocBlockSums(len);
prescanArray(output, input, len, pp);
deallocBlockSums();
hipMemcpy(d_output,output,rLen*sizeof(int),hipMemcpyDeviceToDevice);
hipFree(input);
hipFree(output);
return;
}else{
preallocBlockSums(rLen);
prescanArray(d_output, d_input, rLen, pp);
deallocBlockSums();
}
}
#endif
| 04986bc0ec8d84f2dba68389b2cad00dc9c1b692.cu | #ifndef SCAN_IMPL_CU
#define SCAN_IMPL_CU
#include "scan.cu"
#include "../include/common.h"
static void scanImpl(int *d_input, int rLen, int *d_output, struct statistic * pp)
{
int len = 2;
if(rLen < len){
int *input, *output;
cudaMalloc((void**)&input,len*sizeof(int));
cudaMalloc((void**)&output, len*sizeof(int));
cudaMemset(input, 0, len*sizeof(int));
cudaMemcpy(input, d_input, rLen*sizeof(int), cudaMemcpyDeviceToDevice);
preallocBlockSums(len);
prescanArray(output, input, len, pp);
deallocBlockSums();
cudaMemcpy(d_output,output,rLen*sizeof(int),cudaMemcpyDeviceToDevice);
cudaFree(input);
cudaFree(output);
return;
}else{
preallocBlockSums(rLen);
prescanArray(d_output, d_input, rLen, pp);
deallocBlockSums();
}
}
#endif
|
0886f9edaa1713d0f5c187f07f0a870a80bebad1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "GPUMultiplyMatrix.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
long *matrix1 = NULL;
hipMalloc(&matrix1, XSIZE*YSIZE);
long *matrix2 = NULL;
hipMalloc(&matrix2, XSIZE*YSIZE);
int paths = 1;
int count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
GPUMultiplyMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix1,matrix2,paths,count);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
GPUMultiplyMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix1,matrix2,paths,count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
GPUMultiplyMatrix), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix1,matrix2,paths,count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0886f9edaa1713d0f5c187f07f0a870a80bebad1.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "GPUMultiplyMatrix.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
long *matrix1 = NULL;
cudaMalloc(&matrix1, XSIZE*YSIZE);
long *matrix2 = NULL;
cudaMalloc(&matrix2, XSIZE*YSIZE);
int paths = 1;
int count = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
GPUMultiplyMatrix<<<gridBlock,threadBlock>>>(matrix1,matrix2,paths,count);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
GPUMultiplyMatrix<<<gridBlock,threadBlock>>>(matrix1,matrix2,paths,count);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
GPUMultiplyMatrix<<<gridBlock,threadBlock>>>(matrix1,matrix2,paths,count);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
c9f39ce820a5b72620e31f999fe915c2d997d64e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cudaFunctions.h"
__global__ void example_kernel(){
printf("Hello from CUDA\n");
}
void call_example(){
printf("able to call cuda function\n");
hipLaunchKernelGGL(( example_kernel), dim3(1),dim3(1), 0, 0, );
hipDeviceSynchronize();
}
| c9f39ce820a5b72620e31f999fe915c2d997d64e.cu | #include "cudaFunctions.h"
__global__ void example_kernel(){
printf("Hello from CUDA\n");
}
void call_example(){
printf("able to call cuda function\n");
example_kernel<<<1,1>>>();
cudaDeviceSynchronize();
}
|
5e2f206eeb9b1cd31300d561650096dc8c208cb1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by jiashuai on 17-9-20.
//
#include <thundersvm/syncarray.h>
#include <hipsparse.h>
#include "thundersvm/kernel/kernelmatrix_kernel.h"
#include <thundersvm/config.h>
#include <rocblas.h>
#include <hipcub/hipcub.hpp>
namespace svm_kernel {
__global__ void
kernel_get_working_set_ins(const kernel_type *val, const int *col_ind, const int *row_ptr, const int *data_row_idx,
kernel_type *data_rows,
int m, int n) {
KERNEL_LOOP(i, m) {
int row = data_row_idx[i];
for (int j = row_ptr[row]; j < row_ptr[row + 1]; ++j) {
int col = col_ind[j];
data_rows[col * m + i] = val[j]; // col-major for cuSPARSE
}
}
}
__global__ void
kernel_get_working_set_ins_dns(const kernel_type *val, const int *data_row_idx,
kernel_type *data_rows,
int m, int n,int n_instances) {
KERNEL_LOOP(i, m) {
int row = data_row_idx[i];
for (int j = 0; j < n; ++j) {
//data_rows[i*n + j] = val[row*n+j]; // row-major for cublas
// data_rows[i + j*m] = val[row*n+j]; // col-major for cublas
data_rows[i + j*m] = val[row+j*n_instances]; // col-major for cublas, val col major
}
}
}
__global__ void
kernel_RBF_kernel(const kernel_type *self_dot0, const kernel_type *self_dot1, kernel_type *dot_product, int m, int n,
kernel_type gamma) {
//m rows of kernel matrix, where m is the working set size; n is the number of training instances
KERNEL_LOOP(idx, m * n) {
int i = idx / n;//i is row id
int j = idx % n;//j is column id
dot_product[idx] = expf(-(self_dot0[i] + self_dot1[j] - dot_product[idx] * 2) * gamma);
}
}
__global__ void
kernel_RBF_kernel(const int *self_dot0_idx, const kernel_type *self_dot1, kernel_type *dot_product, int m, int n,
kernel_type gamma) {
//compute m rows of kernel matrix, where m is the working set size and n is the number of training instances, according to idx
KERNEL_LOOP(idx, m * n) {
int i = idx / n;//i is row id
int j = idx % n;//j is column id
dot_product[idx] = expf(-(self_dot1[self_dot0_idx[i]] + self_dot1[j] - dot_product[idx] * 2) * gamma);
}
}
__global__ void
kernel_sum_kernel_values(const float_type *coef, int total_sv, const int *sv_start, const int *sv_count,
const float_type *rho,
const kernel_type *k_mat, float_type *dec_values, int n_classes, int n_instances) {
KERNEL_LOOP(idx, n_instances) {
int k = 0;
int n_binary_models = n_classes * (n_classes - 1) / 2;
for (int i = 0; i < n_classes; ++i) {
for (int j = i + 1; j < n_classes; ++j) {
int si = sv_start[i];
int sj = sv_start[j];
int ci = sv_count[i];
int cj = sv_count[j];
const float_type *coef1 = &coef[(j - 1) * total_sv];
const float_type *coef2 = &coef[i * total_sv];
const kernel_type *k_values = &k_mat[idx * total_sv];
double sum = 0;
for (int l = 0; l < ci; ++l) {
sum += coef1[si + l] * k_values[si + l];
}
for (int l = 0; l < cj; ++l) {
sum += coef2[sj + l] * k_values[sj + l];
}
dec_values[idx * n_binary_models + k] = sum - rho[k];
k++;
}
}
}
}
__global__ void
kernel_poly_kernel(kernel_type *dot_product, kernel_type gamma, kernel_type coef0, int degree, int mn) {
KERNEL_LOOP(idx, mn) {
dot_product[idx] = powf(gamma * dot_product[idx] + coef0, degree);
}
}
__global__ void kernel_sigmoid_kernel(kernel_type *dot_product, kernel_type gamma, kernel_type coef0, int mn) {
KERNEL_LOOP(idx, mn) {
dot_product[idx] = tanhf(gamma * dot_product[idx] + coef0);
}
}
void sum_kernel_values(const SyncArray<float_type> &coef, int total_sv, const SyncArray<int> &sv_start,
const SyncArray<int> &sv_count, const SyncArray<float_type> &rho,
const SyncArray<kernel_type> &k_mat,
SyncArray<float_type> &dec_values, int n_classes, int n_instances) {
SAFE_KERNEL_LAUNCH(kernel_sum_kernel_values, coef.device_data(), total_sv, sv_start.device_data(),
sv_count.device_data(), rho.device_data(), k_mat.device_data(), dec_values.device_data(),
n_classes, n_instances);
}
void
get_working_set_ins(const SyncArray<kernel_type> &val, const SyncArray<int> &col_ind, const SyncArray<int> &row_ptr,
const SyncArray<int> &data_row_idx, SyncArray<kernel_type> &data_rows, int m, int n) {
SAFE_KERNEL_LAUNCH(kernel_get_working_set_ins, val.device_data(), col_ind.device_data(), row_ptr.device_data(),
data_row_idx.device_data(), data_rows.device_data(), m, n);
}
void
get_working_set_ins_dns(const SyncArray<kernel_type> &val,
const SyncArray<int> &data_row_idx, SyncArray<kernel_type> &data_rows, int m, int n,int n_instances){
SAFE_KERNEL_LAUNCH(kernel_get_working_set_ins_dns, val.device_data(),
data_row_idx.device_data(), data_rows.device_data(), m, n,n_instances);
}
void
RBF_kernel(const SyncArray<kernel_type> &self_dot0, const SyncArray<kernel_type> &self_dot1,
SyncArray<kernel_type> &dot_product, int m,
int n,
kernel_type gamma) {
SAFE_KERNEL_LAUNCH(kernel_RBF_kernel, self_dot0.device_data(), self_dot1.device_data(),
dot_product.device_data(), m, n, gamma);
}
void
RBF_kernel(const SyncArray<int> &self_dot0_idx, const SyncArray<kernel_type> &self_dot1,
SyncArray<kernel_type> &dot_product, int m,
int n, kernel_type gamma) {
SAFE_KERNEL_LAUNCH(kernel_RBF_kernel, self_dot0_idx.device_data(), self_dot1.device_data(),
dot_product.device_data(), m, n, gamma);
}
void poly_kernel(SyncArray<kernel_type> &dot_product, kernel_type gamma, kernel_type coef0, int degree, int mn) {
SAFE_KERNEL_LAUNCH(kernel_poly_kernel, dot_product.device_data(), gamma, coef0, degree, mn);
}
void sigmoid_kernel(SyncArray<kernel_type> &dot_product, kernel_type gamma, kernel_type coef0, int mn) {
SAFE_KERNEL_LAUNCH(kernel_sigmoid_kernel, dot_product.device_data(), gamma, coef0, mn);
}
hipsparseHandle_t handle;
hipsparseMatDescr_t descr;
bool cusparse_init;
hipblasHandle_t handle2;
void dns_csr_mul(int m, int n, int k, const SyncArray<kernel_type> &dense_mat, const SyncArray<kernel_type> &csr_val,
const SyncArray<int> &csr_row_ptr, const SyncArray<int> &csr_col_ind, int nnz,
SyncArray<kernel_type> &result) {
if (!cusparse_init) {
hipsparseCreate(&handle);
hipsparseCreateMatDescr(&descr);
hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO);
hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL);
cusparse_init = true;
hipblasCreate(&handle2);
}
kernel_type one(1);
kernel_type zero(0);
#if (CUDART_VERSION >= 11000)
hipsparseSpMatDescr_t matA;
hipsparseDnMatDescr_t matB, matC;
#ifdef USE_DOUBLE
hipDataType data_type = HIP_R_64F;
#else//kernel type is float
hipDataType data_type = HIP_R_32F;
#endif
hipsparseCreateCsr(&matA, m, k, nnz, (void*)csr_row_ptr.device_data(), (void*)csr_col_ind.device_data(),
(void*)csr_val.device_data(), HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, data_type);
//hipsparseCreateDnMat(&matB, n, k, n, (void*)dense_mat.device_data(), data_type, HIPSPARSE_ORDER_COL);
//hipsparseCreateDnMat(&matC, m, n, m, (void*)result.device_data(), data_type, HIPSPARSE_ORDER_COL);
//size_t buffer_size = 0;
//hipsparseSpMM_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE,
// &one, matA, matB, &zero, matC, data_type, HIPSPARSE_CSRMM_ALG1,
// &buffer_size);
//void *p_buffer = nullptr;
//hipMalloc((void**)&p_buffer, buffer_size);
//hipsparseSpMM(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE,
// &one, matA, matB, &zero, matC, data_type, HIPSPARSE_CSRMM_ALG1, p_buffer);
hipsparseCreateDnMat(&matB, k, n, n, (void*)dense_mat.device_data(), data_type, HIPSPARSE_ORDER_ROW);
SyncArray<kernel_type> tmp_res(m*n);
hipsparseCreateDnMat(&matC, m, n, n, (void*)tmp_res.device_data(), data_type, HIPSPARSE_ORDER_ROW);
size_t buffer_size = 0;
hipsparseSpMM_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
&one, matA, matB, &zero, matC, data_type, CUSPARSE_SPMM_CSR_ALG2,
&buffer_size);
void *p_buffer = nullptr;
hipMalloc((void**)&p_buffer, buffer_size);
hipsparseSpMM(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE,
&one, matA, matB, &zero, matC, data_type, CUSPARSE_SPMM_CSR_ALG2, p_buffer);
hipblasStatus_t success=hipblasSgeam( handle2, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n,
&one, tmp_res.device_data(), n, &zero, tmp_res.device_data(), m,
result.device_data(), m);
hipFree(p_buffer);
hipsparseDestroySpMat(matA);
hipsparseDestroyDnMat(matB);
hipsparseDestroyDnMat(matC);
#else
#ifdef USE_DOUBLE
hipsparseDcsrmm2(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE,
m, n, k, nnz, &one, descr, csr_val.device_data(), csr_row_ptr.device_data(),
csr_col_ind.device_data(),
dense_mat.device_data(), n, &zero, result.device_data(), m);
#else//kernel type is float
hipsparseScsrmm2(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_TRANSPOSE,
m, n, k, nnz, &one, descr, csr_val.device_data(), csr_row_ptr.device_data(),
csr_col_ind.device_data(),
dense_mat.device_data(), n, &zero, result.device_data(), m);
//hipsparseScsrmm return row-major matrix, so no transpose is needed
#endif // ifdef USE_DOUBLE
#endif // if CUDART_VERSION >= 11000
}
//dns dns mul
hipblasHandle_t handle_blas;
bool cublas_init;
void dns_dns_mul(int m, int n, int k, const SyncArray<kernel_type> &dense_a,const SyncArray<kernel_type> &dense_b,kernel_type beta,
SyncArray<kernel_type> &result){
if (!cublas_init) {
hipblasCreate(&handle_blas);
cublas_init = true;
}
kernel_type alpha=1.0;
const kernel_type* d_dense_a = dense_a.device_data();
const kernel_type* d_dense_b = dense_b.device_data();
// hipblasSgemm(handle_blas,HIPBLAS_OP_T,HIPBLAS_OP_N, m, n, k,&alpha,dense_a.device_data(), k, dense_b.device_data(), k,&beta, result.device_data(), m);
//dense b :k*n
// hipblasSgemm(handle_blas,HIPBLAS_OP_T,HIPBLAS_OP_T, m, n, k,&alpha,dense_a.device_data(), k, dense_b.device_data(), n,&beta, result.device_data(), m);
hipblasSgemm(handle_blas,HIPBLAS_OP_N,HIPBLAS_OP_T, m, n, k,&alpha,dense_a.device_data(), m, dense_b.device_data(), n,&beta, result.device_data(), m);
}
}
| 5e2f206eeb9b1cd31300d561650096dc8c208cb1.cu | //
// Created by jiashuai on 17-9-20.
//
#include <thundersvm/syncarray.h>
#include <cusparse.h>
#include "thundersvm/kernel/kernelmatrix_kernel.h"
#include <thundersvm/config.h>
#include <cublas_v2.h>
#include <cub/cub.cuh>
namespace svm_kernel {
__global__ void
kernel_get_working_set_ins(const kernel_type *val, const int *col_ind, const int *row_ptr, const int *data_row_idx,
kernel_type *data_rows,
int m, int n) {
KERNEL_LOOP(i, m) {
int row = data_row_idx[i];
for (int j = row_ptr[row]; j < row_ptr[row + 1]; ++j) {
int col = col_ind[j];
data_rows[col * m + i] = val[j]; // col-major for cuSPARSE
}
}
}
__global__ void
kernel_get_working_set_ins_dns(const kernel_type *val, const int *data_row_idx,
kernel_type *data_rows,
int m, int n,int n_instances) {
KERNEL_LOOP(i, m) {
int row = data_row_idx[i];
for (int j = 0; j < n; ++j) {
//data_rows[i*n + j] = val[row*n+j]; // row-major for cublas
// data_rows[i + j*m] = val[row*n+j]; // col-major for cublas
data_rows[i + j*m] = val[row+j*n_instances]; // col-major for cublas, val col major
}
}
}
__global__ void
kernel_RBF_kernel(const kernel_type *self_dot0, const kernel_type *self_dot1, kernel_type *dot_product, int m, int n,
kernel_type gamma) {
//m rows of kernel matrix, where m is the working set size; n is the number of training instances
KERNEL_LOOP(idx, m * n) {
int i = idx / n;//i is row id
int j = idx % n;//j is column id
dot_product[idx] = expf(-(self_dot0[i] + self_dot1[j] - dot_product[idx] * 2) * gamma);
}
}
__global__ void
kernel_RBF_kernel(const int *self_dot0_idx, const kernel_type *self_dot1, kernel_type *dot_product, int m, int n,
kernel_type gamma) {
//compute m rows of kernel matrix, where m is the working set size and n is the number of training instances, according to idx
KERNEL_LOOP(idx, m * n) {
int i = idx / n;//i is row id
int j = idx % n;//j is column id
dot_product[idx] = expf(-(self_dot1[self_dot0_idx[i]] + self_dot1[j] - dot_product[idx] * 2) * gamma);
}
}
__global__ void
kernel_sum_kernel_values(const float_type *coef, int total_sv, const int *sv_start, const int *sv_count,
const float_type *rho,
const kernel_type *k_mat, float_type *dec_values, int n_classes, int n_instances) {
KERNEL_LOOP(idx, n_instances) {
int k = 0;
int n_binary_models = n_classes * (n_classes - 1) / 2;
for (int i = 0; i < n_classes; ++i) {
for (int j = i + 1; j < n_classes; ++j) {
int si = sv_start[i];
int sj = sv_start[j];
int ci = sv_count[i];
int cj = sv_count[j];
const float_type *coef1 = &coef[(j - 1) * total_sv];
const float_type *coef2 = &coef[i * total_sv];
const kernel_type *k_values = &k_mat[idx * total_sv];
double sum = 0;
for (int l = 0; l < ci; ++l) {
sum += coef1[si + l] * k_values[si + l];
}
for (int l = 0; l < cj; ++l) {
sum += coef2[sj + l] * k_values[sj + l];
}
dec_values[idx * n_binary_models + k] = sum - rho[k];
k++;
}
}
}
}
__global__ void
kernel_poly_kernel(kernel_type *dot_product, kernel_type gamma, kernel_type coef0, int degree, int mn) {
KERNEL_LOOP(idx, mn) {
dot_product[idx] = powf(gamma * dot_product[idx] + coef0, degree);
}
}
__global__ void kernel_sigmoid_kernel(kernel_type *dot_product, kernel_type gamma, kernel_type coef0, int mn) {
KERNEL_LOOP(idx, mn) {
dot_product[idx] = tanhf(gamma * dot_product[idx] + coef0);
}
}
void sum_kernel_values(const SyncArray<float_type> &coef, int total_sv, const SyncArray<int> &sv_start,
const SyncArray<int> &sv_count, const SyncArray<float_type> &rho,
const SyncArray<kernel_type> &k_mat,
SyncArray<float_type> &dec_values, int n_classes, int n_instances) {
SAFE_KERNEL_LAUNCH(kernel_sum_kernel_values, coef.device_data(), total_sv, sv_start.device_data(),
sv_count.device_data(), rho.device_data(), k_mat.device_data(), dec_values.device_data(),
n_classes, n_instances);
}
void
get_working_set_ins(const SyncArray<kernel_type> &val, const SyncArray<int> &col_ind, const SyncArray<int> &row_ptr,
const SyncArray<int> &data_row_idx, SyncArray<kernel_type> &data_rows, int m, int n) {
SAFE_KERNEL_LAUNCH(kernel_get_working_set_ins, val.device_data(), col_ind.device_data(), row_ptr.device_data(),
data_row_idx.device_data(), data_rows.device_data(), m, n);
}
void
get_working_set_ins_dns(const SyncArray<kernel_type> &val,
const SyncArray<int> &data_row_idx, SyncArray<kernel_type> &data_rows, int m, int n,int n_instances){
SAFE_KERNEL_LAUNCH(kernel_get_working_set_ins_dns, val.device_data(),
data_row_idx.device_data(), data_rows.device_data(), m, n,n_instances);
}
void
RBF_kernel(const SyncArray<kernel_type> &self_dot0, const SyncArray<kernel_type> &self_dot1,
SyncArray<kernel_type> &dot_product, int m,
int n,
kernel_type gamma) {
SAFE_KERNEL_LAUNCH(kernel_RBF_kernel, self_dot0.device_data(), self_dot1.device_data(),
dot_product.device_data(), m, n, gamma);
}
void
RBF_kernel(const SyncArray<int> &self_dot0_idx, const SyncArray<kernel_type> &self_dot1,
SyncArray<kernel_type> &dot_product, int m,
int n, kernel_type gamma) {
SAFE_KERNEL_LAUNCH(kernel_RBF_kernel, self_dot0_idx.device_data(), self_dot1.device_data(),
dot_product.device_data(), m, n, gamma);
}
void poly_kernel(SyncArray<kernel_type> &dot_product, kernel_type gamma, kernel_type coef0, int degree, int mn) {
SAFE_KERNEL_LAUNCH(kernel_poly_kernel, dot_product.device_data(), gamma, coef0, degree, mn);
}
void sigmoid_kernel(SyncArray<kernel_type> &dot_product, kernel_type gamma, kernel_type coef0, int mn) {
SAFE_KERNEL_LAUNCH(kernel_sigmoid_kernel, dot_product.device_data(), gamma, coef0, mn);
}
cusparseHandle_t handle;
cusparseMatDescr_t descr;
bool cusparse_init;
cublasHandle_t handle2;
void dns_csr_mul(int m, int n, int k, const SyncArray<kernel_type> &dense_mat, const SyncArray<kernel_type> &csr_val,
const SyncArray<int> &csr_row_ptr, const SyncArray<int> &csr_col_ind, int nnz,
SyncArray<kernel_type> &result) {
if (!cusparse_init) {
cusparseCreate(&handle);
cusparseCreateMatDescr(&descr);
cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO);
cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL);
cusparse_init = true;
cublasCreate(&handle2);
}
kernel_type one(1);
kernel_type zero(0);
#if (CUDART_VERSION >= 11000)
cusparseSpMatDescr_t matA;
cusparseDnMatDescr_t matB, matC;
#ifdef USE_DOUBLE
cudaDataType data_type = CUDA_R_64F;
#else//kernel type is float
cudaDataType data_type = CUDA_R_32F;
#endif
cusparseCreateCsr(&matA, m, k, nnz, (void*)csr_row_ptr.device_data(), (void*)csr_col_ind.device_data(),
(void*)csr_val.device_data(), CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, data_type);
//cusparseCreateDnMat(&matB, n, k, n, (void*)dense_mat.device_data(), data_type, CUSPARSE_ORDER_COL);
//cusparseCreateDnMat(&matC, m, n, m, (void*)result.device_data(), data_type, CUSPARSE_ORDER_COL);
//size_t buffer_size = 0;
//cusparseSpMM_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE,
// &one, matA, matB, &zero, matC, data_type, CUSPARSE_CSRMM_ALG1,
// &buffer_size);
//void *p_buffer = nullptr;
//cudaMalloc((void**)&p_buffer, buffer_size);
//cusparseSpMM(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE,
// &one, matA, matB, &zero, matC, data_type, CUSPARSE_CSRMM_ALG1, p_buffer);
cusparseCreateDnMat(&matB, k, n, n, (void*)dense_mat.device_data(), data_type, CUSPARSE_ORDER_ROW);
SyncArray<kernel_type> tmp_res(m*n);
cusparseCreateDnMat(&matC, m, n, n, (void*)tmp_res.device_data(), data_type, CUSPARSE_ORDER_ROW);
size_t buffer_size = 0;
cusparseSpMM_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
&one, matA, matB, &zero, matC, data_type, CUSPARSE_SPMM_CSR_ALG2,
&buffer_size);
void *p_buffer = nullptr;
cudaMalloc((void**)&p_buffer, buffer_size);
cusparseSpMM(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE,
&one, matA, matB, &zero, matC, data_type, CUSPARSE_SPMM_CSR_ALG2, p_buffer);
cublasStatus_t success=cublasSgeam( handle2, CUBLAS_OP_T, CUBLAS_OP_N, m, n,
&one, tmp_res.device_data(), n, &zero, tmp_res.device_data(), m,
result.device_data(), m);
cudaFree(p_buffer);
cusparseDestroySpMat(matA);
cusparseDestroyDnMat(matB);
cusparseDestroyDnMat(matC);
#else
#ifdef USE_DOUBLE
cusparseDcsrmm2(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE,
m, n, k, nnz, &one, descr, csr_val.device_data(), csr_row_ptr.device_data(),
csr_col_ind.device_data(),
dense_mat.device_data(), n, &zero, result.device_data(), m);
#else//kernel type is float
cusparseScsrmm2(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_TRANSPOSE,
m, n, k, nnz, &one, descr, csr_val.device_data(), csr_row_ptr.device_data(),
csr_col_ind.device_data(),
dense_mat.device_data(), n, &zero, result.device_data(), m);
//cusparseScsrmm return row-major matrix, so no transpose is needed
#endif // ifdef USE_DOUBLE
#endif // if CUDART_VERSION >= 11000
}
//dns dns mul
cublasHandle_t handle_blas;
bool cublas_init;
void dns_dns_mul(int m, int n, int k, const SyncArray<kernel_type> &dense_a,const SyncArray<kernel_type> &dense_b,kernel_type beta,
SyncArray<kernel_type> &result){
if (!cublas_init) {
cublasCreate(&handle_blas);
cublas_init = true;
}
kernel_type alpha=1.0;
const kernel_type* d_dense_a = dense_a.device_data();
const kernel_type* d_dense_b = dense_b.device_data();
// cublasSgemm(handle_blas,CUBLAS_OP_T,CUBLAS_OP_N, m, n, k,&alpha,dense_a.device_data(), k, dense_b.device_data(), k,&beta, result.device_data(), m);
//dense b :k*n
// cublasSgemm(handle_blas,CUBLAS_OP_T,CUBLAS_OP_T, m, n, k,&alpha,dense_a.device_data(), k, dense_b.device_data(), n,&beta, result.device_data(), m);
cublasSgemm(handle_blas,CUBLAS_OP_N,CUBLAS_OP_T, m, n, k,&alpha,dense_a.device_data(), m, dense_b.device_data(), n,&beta, result.device_data(), m);
}
}
|
0debb43fe1c919f2ff0cd47c24abb33d5678e92b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//Library Definition
#include <iostream> //cout
#include <fstream> //Files
#include <cstdlib> //atoi function
//Constant Definition
#define PI 3.141592654
#define blocksize 32
#define n 512
#define p 128
//Print matrix into standard output
void print(double * M,int cols,int rows);
void dot(double * a,double * b, double & c, int cols);
/*
DEVICE FUNCTIONS
*/
//Matrix transposition (Rows and Cols of M)
__global__ void matrixTrans(double * M,double * MT, int rows, int cols);
//Matrix multiplication(Cols and Rows of the result)
__global__ void matrixMul(double * a,double * b, double * C, int cols,int rows,int cols2);
//INVERSION OF MATRICES ----GAUSS JORDAN METHOD --------
void Inverse(double * A, double * I,int nn);
__global__ void nodiag_normalize(double *A, double *I, int nn, int i);
__global__ void diag_normalize(double *A, double *I, int nn, int i);
__global__ void gaussjordan(double *A, double *I, int nn, int i);
__global__ void set_zero(double *A, double *I, int nn, int i);
//Sum of Matrices
__global__ void matrixSum(const double * M1,const double * M2,double * Msum,double alpha,double beta, int rows, int cols);
//Initialization of matrices, ones, zeros, identity
void set_ones(double * M, int l);
void set_zeros(double * M, int l);
void set_iden(double * M, int l);
//Print matrices into external files
void print_file(char const * NameArch, const double * M,int cols,int rows);
//Random numbers
double normal_rand(void);
/*
MAIN FUNCTION
*/
int main(int argc, char * argv[]){
srand(atoi(argv[1])); //Seed recieved from terminal
//int cols=p;
//int raws=n;
double *X, *Xt, *XXt, *Inv;
double *H0,*H, *J, *Suma;
double *Y,*Yt, *aux, *Id;
int size0 = n * sizeof(double);
int size2 = p * p * sizeof(double);
int size3 = n * n * sizeof(double);
int size4 = n * p * sizeof(double);
hipMallocManaged(&X,size4);
hipMallocManaged(&Xt,size4);
hipMallocManaged(&H0,size4);
hipMallocManaged(&H,size3);
hipMallocManaged(&J,size3);
hipMallocManaged(&Suma,size3);
hipMallocManaged(&XXt,size2);
hipMallocManaged(&Yt,size0);
hipMallocManaged(&Inv,size2);
hipMallocManaged(&Y,size0);
hipMallocManaged(&aux,size0);
hipMallocManaged(&Id,size3);
double ssr=0,sst=0;
double R2=0;
double F=0, Ftest=1.1962078803512777;
for(int row=0;row<n;row++){
for(int col=0;col<p;col++){
X[row*p+col]=0.1*normal_rand();//distribution(generator);
Y[col]=0.1*normal_rand();
}
}
print_file("x.dat",X,p,n);
dim3 threadsPerBlock(blocksize, blocksize);
dim3 numBlocks((p + blocksize - 1) / blocksize, (n + blocksize - 1) / blocksize);
dim3 numBlocks1((p + blocksize - 1) / blocksize, (p + blocksize - 1) / blocksize);
dim3 numBlocks2((n + blocksize - 1) / blocksize, (n + blocksize - 1) / blocksize);
hipLaunchKernelGGL((
matrixTrans), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, X,Xt,n,p);
hipDeviceSynchronize();
hipLaunchKernelGGL((
matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, Xt,X,XXt,p,p,n);
hipDeviceSynchronize();
set_iden(Inv,p);
Inverse(XXt,Inv,p);hipDeviceSynchronize();
//std::cout<<"inv"<<std::endl;
//print_file("Inv.dat",Inv,p,p);
//matrixMul<<<numBlocks,threadsPerBlock>>>(X,Xt,XXt,p,p,n);
//hipDeviceSynchronize();
hipLaunchKernelGGL((
matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, Inv,Xt,H0,n,p,p);
hipDeviceSynchronize();
//print_file("H0.dat",H0,p,n);
hipLaunchKernelGGL((
matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, X,H0,H,n,n,p);
hipDeviceSynchronize();
//print_file("H.dat",H,n,n);
set_ones(J,n);hipLaunchKernelGGL((
matrixSum), dim3(numBlocks1),dim3(threadsPerBlock), 0, 0, H,J,Suma,1.,-1./n, n,n);
hipDeviceSynchronize();
hipLaunchKernelGGL((
matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, Suma,Y,aux,1,n,p);
hipDeviceSynchronize();
//matrixMul<<<numBlocks,threadsPerBlock>>>(Y,aux,J,1,1,n);
//hipDeviceSynchronize();
//ssr=J[0];
dot(Y,aux,ssr,n);
set_iden(Id,n);
set_zeros(Suma,n*n);hipLaunchKernelGGL((
matrixSum), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, Id,J,Suma,1.,-1./n, n, n);
hipDeviceSynchronize();
set_zeros(aux,n);
hipLaunchKernelGGL((
matrixMul), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, Suma,Y,aux,n,1,n);
hipDeviceSynchronize();
//matrixMul<<<numBlocks,threadsPerBlock>>>(Y,aux,J,1,1,n);
//hipDeviceSynchronize();
//sst=J[0];
dot(Y,aux,sst,n);
R2=ssr/sst;
F=(R2*(n-p-1.))/((1.-R2)*p);
std::cout<<R2<<' '<<ssr<<' '<<sst<<' '<<F<<std::endl;
hipFree(X);
hipFree(Xt);
hipFree(XXt);
hipFree(Inv);
hipFree(H0);
hipFree(H);
hipFree(J);
hipFree(Suma);
return 0;
}
void print(double * M,int cols,int rows){
for( int row = 0; row < rows; ++row ){
for( int col = 0; col < cols; ++col )
{
std::cout<<M[col + row*cols]<<'\t';
}
std::cout<<"\n";
}
}
__global__ void matrixTrans(double * M,double * MT, int rows, int cols)
{
double val=0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols){
val = M[col + row*cols];
MT[row + col*rows] = val;
}
}
__global__ void matrixMul(double * a,double * b, double * C, int cols,int rows,int cols2)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols){
C[row*cols+col] =0;
for (int k = 0; k < cols2; k++){
C[row*cols+col]+=b[k*cols+col]*a[row*cols2+k];
}
}
}
__global__ void nodiag_normalize(double *A, double *I, int nn, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x< nn && y < nn){
if (x < nn && y < nn){
if (x == i && x!=y){
I[x*nn + y] /= A[i*nn + i];
A[x*nn + y] /= A[i*nn + i];
}
}
}
}
__global__ void diag_normalize(double *A, double *I, int nn, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < nn && y < nn){
if (x == y && x == i){
I[x*nn + y] /= A[i*nn + i];
A[x*nn + y] /= A[i*nn + i];
}
}
}
__global__ void gaussjordan(double *A, double *I, int nn, int i)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x< nn && y < nn){
if (x < nn && y < nn){
if (x != i){
I[x*nn + y] -= I[i*nn + y] * A[x*nn + i];
if (y != i){
A[x*nn + y] -= A[i*nn + y] * A[x*nn + i];
}
}
}
}
}
__global__ void set_zero(double *A, double *I, int nn, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < nn && y < nn){
if (x != i){
if (y == i){
A[x*nn + y] = 0;
}
}
}
}
void Inverse(double * A, double * I,int nn){
dim3 threadsPerBlock2(blocksize, blocksize);
dim3 numBlocks2((nn + blocksize - 1) / blocksize, (nn + blocksize - 1) / blocksize);
for (int i = 0; i<nn; i++){
nodiag_normalize << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i);
diag_normalize << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i);
gaussjordan << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i);
set_zero << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i);
}
hipDeviceSynchronize();
}
__global__ void matrixSum(const double * M1,const double * M2,double * Msum,double alpha,double beta, int rows, int cols)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols){
Msum[row + col*rows] = alpha*M1[row+col*rows]+beta*M2[row+col*rows];
}
}
void print_file(char const * NameArch, const double * M,int cols,int rows){
std::ofstream File(NameArch);
File.precision(16);
for( int row = 0; row < rows; ++row ){
for( int col = 0; col < cols; ++col )
{
File<<M[col + row*cols]<<'\t';
}
File<<"\n";
}
File.close();
}
// Random number generator as per Abramowitz & Stegun
// Source taken from:
// http://c-faq.com/lib/gaussian.html
double normal_rand(void){
static double U, V;
static int phase = 0;
double Z;
if(phase == 0) {
U = (rand() + 1.) / (RAND_MAX + 2.);
V = rand() / (RAND_MAX + 1.);
Z = sqrt(-2 * log(U)) * sin(2 * PI * V);
} else
Z = sqrt(-2 * log(U)) * cos(2 * PI * V);
phase = 1 - phase;
return Z;
}
void set_iden(double * M, int l){
for(int row=0;row<l;row++){
for(int col=0;col<l;col++){
M[row*l+col]=0;
if (col==row){
M[row*l+col]=1;
}
}
}
}
void set_ones(double * M, int l){
for(int row=0;row<l;row++){
for(int col=0;col<l;col++){
M[row*l+col]=1;
}
}
}
void set_zeros(double * M, int l){
for(int row=0;row<l;row++){
M[row]=0;
}
}
void dot(double * a,double * b, double & c, int cols){
c=0;
for(int i=0;i<cols;i++){
c+=a[i]*b[i];
}
}
| 0debb43fe1c919f2ff0cd47c24abb33d5678e92b.cu | //Library Definition
#include <iostream> //cout
#include <fstream> //Files
#include <cstdlib> //atoi function
//Constant Definition
#define PI 3.141592654
#define blocksize 32
#define n 512
#define p 128
//Print matrix into standard output
void print(double * M,int cols,int rows);
void dot(double * a,double * b, double & c, int cols);
/*
DEVICE FUNCTIONS
*/
//Matrix transposition (Rows and Cols of M)
__global__ void matrixTrans(double * M,double * MT, int rows, int cols);
//Matrix multiplication(Cols and Rows of the result)
__global__ void matrixMul(double * a,double * b, double * C, int cols,int rows,int cols2);
//INVERSION OF MATRICES ----GAUSS JORDAN METHOD --------
void Inverse(double * A, double * I,int nn);
__global__ void nodiag_normalize(double *A, double *I, int nn, int i);
__global__ void diag_normalize(double *A, double *I, int nn, int i);
__global__ void gaussjordan(double *A, double *I, int nn, int i);
__global__ void set_zero(double *A, double *I, int nn, int i);
//Sum of Matrices
__global__ void matrixSum(const double * M1,const double * M2,double * Msum,double alpha,double beta, int rows, int cols);
//Initialization of matrices, ones, zeros, identity
void set_ones(double * M, int l);
void set_zeros(double * M, int l);
void set_iden(double * M, int l);
//Print matrices into external files
void print_file(char const * NameArch, const double * M,int cols,int rows);
//Random numbers
double normal_rand(void);
/*
MAIN FUNCTION
*/
int main(int argc, char * argv[]){
srand(atoi(argv[1])); //Seed recieved from terminal
//int cols=p;
//int raws=n;
double *X, *Xt, *XXt, *Inv;
double *H0,*H, *J, *Suma;
double *Y,*Yt, *aux, *Id;
int size0 = n * sizeof(double);
int size2 = p * p * sizeof(double);
int size3 = n * n * sizeof(double);
int size4 = n * p * sizeof(double);
cudaMallocManaged(&X,size4);
cudaMallocManaged(&Xt,size4);
cudaMallocManaged(&H0,size4);
cudaMallocManaged(&H,size3);
cudaMallocManaged(&J,size3);
cudaMallocManaged(&Suma,size3);
cudaMallocManaged(&XXt,size2);
cudaMallocManaged(&Yt,size0);
cudaMallocManaged(&Inv,size2);
cudaMallocManaged(&Y,size0);
cudaMallocManaged(&aux,size0);
cudaMallocManaged(&Id,size3);
double ssr=0,sst=0;
double R2=0;
double F=0, Ftest=1.1962078803512777;
for(int row=0;row<n;row++){
for(int col=0;col<p;col++){
X[row*p+col]=0.1*normal_rand();//distribution(generator);
Y[col]=0.1*normal_rand();
}
}
print_file("x.dat",X,p,n);
dim3 threadsPerBlock(blocksize, blocksize);
dim3 numBlocks((p + blocksize - 1) / blocksize, (n + blocksize - 1) / blocksize);
dim3 numBlocks1((p + blocksize - 1) / blocksize, (p + blocksize - 1) / blocksize);
dim3 numBlocks2((n + blocksize - 1) / blocksize, (n + blocksize - 1) / blocksize);
matrixTrans<<<numBlocks,threadsPerBlock>>>(X,Xt,n,p);
cudaDeviceSynchronize();
matrixMul<<<numBlocks,threadsPerBlock>>>(Xt,X,XXt,p,p,n);
cudaDeviceSynchronize();
set_iden(Inv,p);
Inverse(XXt,Inv,p);cudaDeviceSynchronize();
//std::cout<<"inv"<<std::endl;
//print_file("Inv.dat",Inv,p,p);
//matrixMul<<<numBlocks,threadsPerBlock>>>(X,Xt,XXt,p,p,n);
//cudaDeviceSynchronize();
matrixMul<<<numBlocks,threadsPerBlock>>>(Inv,Xt,H0,n,p,p);
cudaDeviceSynchronize();
//print_file("H0.dat",H0,p,n);
matrixMul<<<numBlocks,threadsPerBlock>>>(X,H0,H,n,n,p);
cudaDeviceSynchronize();
//print_file("H.dat",H,n,n);
set_ones(J,n);
matrixSum<<<numBlocks1,threadsPerBlock>>>(H,J,Suma,1.,-1./n, n,n);
cudaDeviceSynchronize();
matrixMul<<<numBlocks,threadsPerBlock>>>(Suma,Y,aux,1,n,p);
cudaDeviceSynchronize();
//matrixMul<<<numBlocks,threadsPerBlock>>>(Y,aux,J,1,1,n);
//cudaDeviceSynchronize();
//ssr=J[0];
dot(Y,aux,ssr,n);
set_iden(Id,n);
set_zeros(Suma,n*n);
matrixSum<<<numBlocks,threadsPerBlock>>>(Id,J,Suma,1.,-1./n, n, n);
cudaDeviceSynchronize();
set_zeros(aux,n);
matrixMul<<<numBlocks,threadsPerBlock>>>(Suma,Y,aux,n,1,n);
cudaDeviceSynchronize();
//matrixMul<<<numBlocks,threadsPerBlock>>>(Y,aux,J,1,1,n);
//cudaDeviceSynchronize();
//sst=J[0];
dot(Y,aux,sst,n);
R2=ssr/sst;
F=(R2*(n-p-1.))/((1.-R2)*p);
std::cout<<R2<<' '<<ssr<<' '<<sst<<' '<<F<<std::endl;
cudaFree(X);
cudaFree(Xt);
cudaFree(XXt);
cudaFree(Inv);
cudaFree(H0);
cudaFree(H);
cudaFree(J);
cudaFree(Suma);
return 0;
}
void print(double * M,int cols,int rows){
for( int row = 0; row < rows; ++row ){
for( int col = 0; col < cols; ++col )
{
std::cout<<M[col + row*cols]<<'\t';
}
std::cout<<"\n";
}
}
__global__ void matrixTrans(double * M,double * MT, int rows, int cols)
{
double val=0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols){
val = M[col + row*cols];
MT[row + col*rows] = val;
}
}
__global__ void matrixMul(double * a,double * b, double * C, int cols,int rows,int cols2)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols){
C[row*cols+col] =0;
for (int k = 0; k < cols2; k++){
C[row*cols+col]+=b[k*cols+col]*a[row*cols2+k];
}
}
}
__global__ void nodiag_normalize(double *A, double *I, int nn, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x< nn && y < nn){
if (x < nn && y < nn){
if (x == i && x!=y){
I[x*nn + y] /= A[i*nn + i];
A[x*nn + y] /= A[i*nn + i];
}
}
}
}
__global__ void diag_normalize(double *A, double *I, int nn, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < nn && y < nn){
if (x == y && x == i){
I[x*nn + y] /= A[i*nn + i];
A[x*nn + y] /= A[i*nn + i];
}
}
}
__global__ void gaussjordan(double *A, double *I, int nn, int i)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if ( x< nn && y < nn){
if (x < nn && y < nn){
if (x != i){
I[x*nn + y] -= I[i*nn + y] * A[x*nn + i];
if (y != i){
A[x*nn + y] -= A[i*nn + y] * A[x*nn + i];
}
}
}
}
}
__global__ void set_zero(double *A, double *I, int nn, int i){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < nn && y < nn){
if (x != i){
if (y == i){
A[x*nn + y] = 0;
}
}
}
}
void Inverse(double * A, double * I,int nn){
dim3 threadsPerBlock2(blocksize, blocksize);
dim3 numBlocks2((nn + blocksize - 1) / blocksize, (nn + blocksize - 1) / blocksize);
for (int i = 0; i<nn; i++){
nodiag_normalize << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i);
diag_normalize << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i);
gaussjordan << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i);
set_zero << <numBlocks2, threadsPerBlock2 >> >(A, I, nn, i);
}
cudaDeviceSynchronize();
}
__global__ void matrixSum(const double * M1,const double * M2,double * Msum,double alpha,double beta, int rows, int cols)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols){
Msum[row + col*rows] = alpha*M1[row+col*rows]+beta*M2[row+col*rows];
}
}
void print_file(char const * NameArch, const double * M,int cols,int rows){
std::ofstream File(NameArch);
File.precision(16);
for( int row = 0; row < rows; ++row ){
for( int col = 0; col < cols; ++col )
{
File<<M[col + row*cols]<<'\t';
}
File<<"\n";
}
File.close();
}
// Random number generator as per Abramowitz & Stegun
// Source taken from:
// http://c-faq.com/lib/gaussian.html
double normal_rand(void){
static double U, V;
static int phase = 0;
double Z;
if(phase == 0) {
U = (rand() + 1.) / (RAND_MAX + 2.);
V = rand() / (RAND_MAX + 1.);
Z = sqrt(-2 * log(U)) * sin(2 * PI * V);
} else
Z = sqrt(-2 * log(U)) * cos(2 * PI * V);
phase = 1 - phase;
return Z;
}
void set_iden(double * M, int l){
for(int row=0;row<l;row++){
for(int col=0;col<l;col++){
M[row*l+col]=0;
if (col==row){
M[row*l+col]=1;
}
}
}
}
void set_ones(double * M, int l){
for(int row=0;row<l;row++){
for(int col=0;col<l;col++){
M[row*l+col]=1;
}
}
}
void set_zeros(double * M, int l){
for(int row=0;row<l;row++){
M[row]=0;
}
}
void dot(double * a,double * b, double & c, int cols){
c=0;
for(int i=0;i<cols;i++){
c+=a[i]*b[i];
}
}
|
42ff31f773f2176b777896a839e227476b1128ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include "hip/device_functions.h"
#include <random>
#include <algorithm>
#include <iterator>
#include <iostream>
#include <vector>
namespace {
template <typename T>
std::vector<T>& randVector(size_t size)
{
using namespace std;
random_device rnd_device;
// Specify the engine and distribution.
mt19937 mersenne_engine{ rnd_device() }; // Generates random integers
uniform_int_distribution<int> dist{ 1, 52 };
auto gen = [&dist, &mersenne_engine]() {
return dist(mersenne_engine);
};
std::vector<T>* v = new std::vector<T> (size);
generate(v->begin(), v->end(), gen);
// Optional
return *v;
}
const size_t defaultNUM = 64;
}
void check_cuda_error(const char* message)
{
hipError_t err = hipGetLastError();
if (err != hipSuccess)
printf("ERROR: %s: %s\n", message,
hipGetErrorString(err));
}
template <typename T>
__global__ void kernel(T* res, T s, size_t num)
{
extern __shared__ T cache[];
size_t cacheIndex = threadIdx.x;
cache[cacheIndex] = 0;
int tid = 1 + threadIdx.x + blockIdx.x * blockDim.x ;
if (tid < num)
{
cache[cacheIndex] = (float)1 / powf((float)tid, s); //
}
__syncthreads();
// reduction
size_t i = blockDim.x / 2;
while (i != 0) {
if (cacheIndex < i)
{
cache[cacheIndex] += cache[cacheIndex + i];
}
__syncthreads();
i /= 2;
}
if (cacheIndex == i)
atomicAdd(res, cache[0]);
return;
}
int main(int argc, char** argv)
{
size_t N = 1024 *1024;
auto v = randVector<float>(N);
float* res_d; //
float res = 0;
float stepen = 2.0;
hipMalloc((void**)&res_d, sizeof(float));
check_cuda_error("Allocating memory on GPU");
hipMemcpy(res_d, &res, sizeof(float), hipMemcpyHostToDevice);
check_cuda_error("Allocating memory on GPU");
// GPU
size_t THREADS_PER_BLOCK = ::min(::max(64, static_cast<int>(N)), 1024);
size_t BLOCKS_PER_GRID = (N / THREADS_PER_BLOCK) + 1;
kernel << <BLOCKS_PER_GRID, THREADS_PER_BLOCK, THREADS_PER_BLOCK * sizeof(float) >> > (res_d, stepen , N);
hipDeviceSynchronize();
check_cuda_error("Executing kernel");
hipMemcpy(&res, res_d, sizeof(float), hipMemcpyDeviceToHost);
//
check_cuda_error("Copying results from GPU");
hipFree(res_d);
check_cuda_error("Freeing device memory");
printf("Dzeta(%.1f) = %.12f\n",stepen, res);
printf("Theoretical value of Dzeta(2.0) = pi*pi/6 = 1,64493406685\n");
return 0;
}
| 42ff31f773f2176b777896a839e227476b1128ef.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include "device_functions.h"
#include <random>
#include <algorithm>
#include <iterator>
#include <iostream>
#include <vector>
namespace {
template <typename T>
std::vector<T>& randVector(size_t size)
{
using namespace std;
random_device rnd_device;
// Specify the engine and distribution.
mt19937 mersenne_engine{ rnd_device() }; // Generates random integers
uniform_int_distribution<int> dist{ 1, 52 };
auto gen = [&dist, &mersenne_engine]() {
return dist(mersenne_engine);
};
std::vector<T>* v = new std::vector<T> (size);
generate(v->begin(), v->end(), gen);
// Optional
return *v;
}
const size_t defaultNUM = 64;
}
void check_cuda_error(const char* message)
{
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("ERROR: %s: %s\n", message,
cudaGetErrorString(err));
}
template <typename T>
__global__ void kernel(T* res, T s, size_t num)
{
extern __shared__ T cache[];
size_t cacheIndex = threadIdx.x;
cache[cacheIndex] = 0;
int tid = 1 + threadIdx.x + blockIdx.x * blockDim.x ;
if (tid < num)
{
cache[cacheIndex] = (float)1 / powf((float)tid, s); //вычисление очередного слагаемого
}
__syncthreads();
// reduction
size_t i = blockDim.x / 2;
while (i != 0) {
if (cacheIndex < i)
{
cache[cacheIndex] += cache[cacheIndex + i];
}
__syncthreads();
i /= 2;
}
if (cacheIndex == i)
atomicAdd(res, cache[0]);
return;
}
int main(int argc, char** argv)
{
size_t N = 1024 *1024;
auto v = randVector<float>(N);
float* res_d; // Результаты наустройстве
float res = 0;
float stepen = 2.0;
cudaMalloc((void**)&res_d, sizeof(float));
check_cuda_error("Allocating memory on GPU");
cudaMemcpy(res_d, &res, sizeof(float), cudaMemcpyHostToDevice);
check_cuda_error("Allocating memory on GPU");
// Рамеры грида и блока на GPU
size_t THREADS_PER_BLOCK = std::min(std::max(64, static_cast<int>(N)), 1024);
size_t BLOCKS_PER_GRID = (N / THREADS_PER_BLOCK) + 1;
kernel << <BLOCKS_PER_GRID, THREADS_PER_BLOCK, THREADS_PER_BLOCK * sizeof(float) >> > (res_d, stepen , N);
cudaThreadSynchronize();
check_cuda_error("Executing kernel");
cudaMemcpy(&res, res_d, sizeof(float), cudaMemcpyDeviceToHost);
// Копируем результаты на хост
check_cuda_error("Copying results from GPU");
cudaFree(res_d);
check_cuda_error("Freeing device memory");
printf("Dzeta(%.1f) = %.12f\n",stepen, res);
printf("Theoretical value of Dzeta(2.0) = pi*pi/6 = 1,64493406685\n");
return 0;
}
|
414afc5ef0a8243fbc9b42cd0382adee7d69ebb5.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstring>
#include <vector>
#include <random>
//#include <cmath>
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/ggemm.cuh"
#include "caffe/test/test_caffe_main.hpp"
#define CUASSERT(expr) ASSERT_EQ((expr), hipSuccess)
namespace caffe {
static constexpr int kRandomSeed = 1234;
template<typename Dtype> __device__ __host__ __forceinline__
Dtype softmax(Dtype offset, Dtype data, Dtype max, uint8_t nothing) {
#ifdef __CUDA_ARCH__
return exp(data + offset - max);
#else
return ::exp(data + offset - max);
#endif
}
template<typename Dtype> __device__ __host__ __forceinline__
Dtype softmax_activation(Dtype max, Dtype input, uint8_t nothing) {
#ifdef __CUDA_ARCH__
return log(input) + max;
#else
return ::log(input) + max;
#endif
}
TEST(GGEMMTest, Test2Ops) {
typedef float Dtype;
size_t M = 4, N = 8, K = 3;
int num_regions = 9;
std::vector<Dtype> host_a(M*K*num_regions), host_b(K*N*num_regions), host_c (M*N, 0), host_c_res(M*N, 0);
Dtype *d_a, *d_b, *d_c;
CUASSERT(hipMalloc(&d_a, M*K*num_regions*sizeof(Dtype)));
CUASSERT(hipMalloc(&d_b, K*N*num_regions*sizeof(Dtype)));
CUASSERT(hipMalloc(&d_c, M*N*sizeof(Dtype)));
CUASSERT(hipMemset(d_c, 0, M*N*sizeof(Dtype)));
std::mt19937 gen (kRandomSeed);
std::uniform_real_distribution<Dtype> rd(-1, 1);
for (int i = 0; i < M * K*num_regions; ++i) {
host_a[i] = rd(gen);
}
for (int i = 0; i < N * K*num_regions; ++i) {
host_b[i] = rd(gen);
}
CUASSERT(hipMemcpy(d_a, &host_a[0], M * K * num_regions * sizeof(Dtype), hipMemcpyHostToDevice));
CUASSERT(hipMemcpy(d_b, &host_b[0], N * K * num_regions * sizeof(Dtype), hipMemcpyHostToDevice));
ggemm_2ops_gpu
<Dtype, Dtype, Dtype, uint8_t,
ggemm_add<Dtype, uint8_t>, ggemm_max<Dtype>,
softmax<Dtype>, ggemm_add<Dtype>, true,
softmax_activation<Dtype>, true,
true, true, false>
(M, N, K, d_a, d_b, d_c,
-INFINITY, -INFINITY, -INFINITY, 0, 0, num_regions);
CUASSERT(hipMemcpy(&host_c_res[0], d_c, M * N * sizeof(Dtype), hipMemcpyDeviceToHost));
ggemm_2ops_cpu
<Dtype, Dtype, Dtype, uint8_t,
ggemm_add<Dtype, uint8_t>, ggemm_max<Dtype>,
softmax<Dtype>, ggemm_add<Dtype>, true,
softmax_activation<Dtype>, true,
true, true, false>
(M, N, K, &host_a[0], &host_b[0], &host_c[0],
-INFINITY, 0, 0, num_regions);
for (int i = 0; i < M*N; ++i) {
EXPECT_NEAR(host_c_res[i], host_c[i], 1e-4);
}
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
} // namespace caffe
| 414afc5ef0a8243fbc9b42cd0382adee7d69ebb5.cu | #include <cstring>
#include <vector>
#include <random>
//#include <cmath>
#include "gtest/gtest.h"
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/ggemm.cuh"
#include "caffe/test/test_caffe_main.hpp"
#define CUASSERT(expr) ASSERT_EQ((expr), cudaSuccess)
namespace caffe {
static constexpr int kRandomSeed = 1234;
template<typename Dtype> __device__ __host__ __forceinline__
Dtype softmax(Dtype offset, Dtype data, Dtype max, uint8_t nothing) {
#ifdef __CUDA_ARCH__
return exp(data + offset - max);
#else
return std::exp(data + offset - max);
#endif
}
template<typename Dtype> __device__ __host__ __forceinline__
Dtype softmax_activation(Dtype max, Dtype input, uint8_t nothing) {
#ifdef __CUDA_ARCH__
return log(input) + max;
#else
return std::log(input) + max;
#endif
}
TEST(GGEMMTest, Test2Ops) {
typedef float Dtype;
size_t M = 4, N = 8, K = 3;
int num_regions = 9;
std::vector<Dtype> host_a(M*K*num_regions), host_b(K*N*num_regions), host_c (M*N, 0), host_c_res(M*N, 0);
Dtype *d_a, *d_b, *d_c;
CUASSERT(cudaMalloc(&d_a, M*K*num_regions*sizeof(Dtype)));
CUASSERT(cudaMalloc(&d_b, K*N*num_regions*sizeof(Dtype)));
CUASSERT(cudaMalloc(&d_c, M*N*sizeof(Dtype)));
CUASSERT(cudaMemset(d_c, 0, M*N*sizeof(Dtype)));
std::mt19937 gen (kRandomSeed);
std::uniform_real_distribution<Dtype> rd(-1, 1);
for (int i = 0; i < M * K*num_regions; ++i) {
host_a[i] = rd(gen);
}
for (int i = 0; i < N * K*num_regions; ++i) {
host_b[i] = rd(gen);
}
CUASSERT(cudaMemcpy(d_a, &host_a[0], M * K * num_regions * sizeof(Dtype), cudaMemcpyHostToDevice));
CUASSERT(cudaMemcpy(d_b, &host_b[0], N * K * num_regions * sizeof(Dtype), cudaMemcpyHostToDevice));
ggemm_2ops_gpu
<Dtype, Dtype, Dtype, uint8_t,
ggemm_add<Dtype, uint8_t>, ggemm_max<Dtype>,
softmax<Dtype>, ggemm_add<Dtype>, true,
softmax_activation<Dtype>, true,
true, true, false>
(M, N, K, d_a, d_b, d_c,
-INFINITY, -INFINITY, -INFINITY, 0, 0, num_regions);
CUASSERT(cudaMemcpy(&host_c_res[0], d_c, M * N * sizeof(Dtype), cudaMemcpyDeviceToHost));
ggemm_2ops_cpu
<Dtype, Dtype, Dtype, uint8_t,
ggemm_add<Dtype, uint8_t>, ggemm_max<Dtype>,
softmax<Dtype>, ggemm_add<Dtype>, true,
softmax_activation<Dtype>, true,
true, true, false>
(M, N, K, &host_a[0], &host_b[0], &host_c[0],
-INFINITY, 0, 0, num_regions);
for (int i = 0; i < M*N; ++i) {
EXPECT_NEAR(host_c_res[i], host_c[i], 1e-4);
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
} // namespace caffe
|
71bce6c041f90f5dedb7463f863c9838d064109f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _REDUCTION_KERNEL_H_
#define _REDUCTION_KERNEL_H_
#include <stdio.h>
#include "reduction.h"
__global__ void reduction_naive(float* d_odata, float* d_idata, int num_elements)
{
int idx = (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
d_odata[idx] = d_idata[idx]+d_idata[idx+num_elements/2];
}
#define COALESCED_NUM 16
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimX 65536
#define globalDimY 1
__global__ void reduction_opt_0(float * A, int size, int segSize)
{
#pragma gCompiler gValue segSize 262144
int k;
float sum;
int nidx;
__shared__ float shared_0[512];
nidx=((((tidx/16)*2048)+(idx&15))+((idx/512)*16));
float tmp_2;
float tmp_3;
float tmp_0;
float tmp_1;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float r;
r=A[(nidx+k)];
sum+=r;
}
tmp_0=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float r;
r=A[((nidx+131072)+k)];
sum+=r;
}
tmp_1=sum;
__syncthreads();
float a;
float b;
float c;
a=tmp_0;
b=tmp_1;
c=(a+b);
tmp_2=c;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float r;
r=A[((nidx+65536)+k)];
sum+=r;
}
tmp_0=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float r;
r=A[(((nidx+65536)+131072)+k)];
sum+=r;
}
tmp_1=sum;
__syncthreads();
a=tmp_0;
b=tmp_1;
c=(a+b);
tmp_3=c;
a=tmp_2;
b=tmp_3;
c=(a+b);
shared_0[(tidx+0)]=c;
__syncthreads();
if ((nidx<32768))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+256)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<16384))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+128)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<8192))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+64)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<4096))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+32)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<2048))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+16)];
c=(a+b);
{
A[nidx]=c;
}
}
}
#define COALESCED_NUM 16
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimX 512
#define globalDimY 1
__global__ void reduction_opt_1(float * A, int size, int segSize)
{
#pragma gCompiler gValue segSize 262144
__shared__ float shared_1[512];
float tmp_4;
float tmp_5;
float a;
float b;
float c;
{
a=A[idx];
}
{
b=A[(idx+((262144/128)/2))];
}
c=(a+b);
tmp_4=c;
{
a=A[(idx+512)];
}
{
b=A[((idx+512)+((262144/128)/2))];
}
c=(a+b);
tmp_5=c;
a=tmp_4;
b=tmp_5;
c=(a+b);
shared_1[(tidx+0)]=c;
__syncthreads();
if ((idx<256))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+256)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<128))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+128)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<64))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+64)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<32))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+32)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<16))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+16)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<8))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+8)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<4))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+4)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<2))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+2)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<1))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+1)];
c=(a+b);
{
A[idx]=c;
}
}
}
#define COALESCED_NUM 16
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimX 65536
#define globalDimY 1
__global__ void reduction_complex_opt_0(float * A, float * B, int size, int segSize)
{
#pragma gCompiler gValue segSize 262144
int k;
float sum;
int nidx;
__shared__ float shared_0[512];
nidx=((((tidx/16)*2048)+(idx&15))+((idx/512)*16));
float tmp_4;
float tmp_5;
float tmp_2;
float tmp_3;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[(nidx+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_2=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[((nidx+131072)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_3=sum;
__syncthreads();
float a;
float b;
float c;
a=tmp_2;
b=tmp_3;
c=(a+b);
tmp_4=c;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[((nidx+65536)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_2=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[(((nidx+65536)+131072)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_3=sum;
__syncthreads();
a=tmp_2;
b=tmp_3;
c=(a+b);
tmp_5=c;
a=tmp_4;
b=tmp_5;
c=(a+b);
shared_0[(tidx+0)]=c;
__syncthreads();
if ((nidx<32768))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+256)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<16384))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+128)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<8192))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+64)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<4096))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+32)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<2048))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+16)];
c=(a+b);
{
B[nidx]=c;
}
}
}
#define COALESCED_NUM 16
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimX 512
#define globalDimY 1
__global__ void reduction_complex_opt_1(float * A, float * B, int size, int segSize)
{
#pragma gCompiler gValue segSize 262144
__shared__ float shared_1[512];
float tmp_6;
float tmp_7;
float a;
float b;
float c;
{
a=B[idx];
}
{
b=B[(idx+((262144/128)/2))];
}
c=(a+b);
tmp_6=c;
{
a=B[(idx+512)];
}
{
b=B[((idx+512)+((262144/128)/2))];
}
c=(a+b);
tmp_7=c;
a=tmp_6;
b=tmp_7;
c=(a+b);
shared_1[(tidx+0)]=c;
__syncthreads();
if ((idx<256))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+256)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<128))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+128)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<64))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+64)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<32))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+32)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<16))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+16)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<8))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+8)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<4))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+4)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<2))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+2)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<1))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+1)];
c=(a+b);
{
B[idx]=c;
}
}
}
#endif // #ifndef _REDUCTION_KERNEL_H_
| 71bce6c041f90f5dedb7463f863c9838d064109f.cu |
#ifndef _REDUCTION_KERNEL_H_
#define _REDUCTION_KERNEL_H_
#include <stdio.h>
#include "reduction.h"
__global__ void reduction_naive(float* d_odata, float* d_idata, int num_elements)
{
int idx = (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
d_odata[idx] = d_idata[idx]+d_idata[idx+num_elements/2];
}
#define COALESCED_NUM 16
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimX 65536
#define globalDimY 1
__global__ void reduction_opt_0(float * A, int size, int segSize)
{
#pragma gCompiler gValue segSize 262144
int k;
float sum;
int nidx;
__shared__ float shared_0[512];
nidx=((((tidx/16)*2048)+(idx&15))+((idx/512)*16));
float tmp_2;
float tmp_3;
float tmp_0;
float tmp_1;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float r;
r=A[(nidx+k)];
sum+=r;
}
tmp_0=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float r;
r=A[((nidx+131072)+k)];
sum+=r;
}
tmp_1=sum;
__syncthreads();
float a;
float b;
float c;
a=tmp_0;
b=tmp_1;
c=(a+b);
tmp_2=c;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float r;
r=A[((nidx+65536)+k)];
sum+=r;
}
tmp_0=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float r;
r=A[(((nidx+65536)+131072)+k)];
sum+=r;
}
tmp_1=sum;
__syncthreads();
a=tmp_0;
b=tmp_1;
c=(a+b);
tmp_3=c;
a=tmp_2;
b=tmp_3;
c=(a+b);
shared_0[(tidx+0)]=c;
__syncthreads();
if ((nidx<32768))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+256)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<16384))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+128)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<8192))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+64)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<4096))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+32)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<2048))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+16)];
c=(a+b);
{
A[nidx]=c;
}
}
}
#define COALESCED_NUM 16
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimX 512
#define globalDimY 1
__global__ void reduction_opt_1(float * A, int size, int segSize)
{
#pragma gCompiler gValue segSize 262144
__shared__ float shared_1[512];
float tmp_4;
float tmp_5;
float a;
float b;
float c;
{
a=A[idx];
}
{
b=A[(idx+((262144/128)/2))];
}
c=(a+b);
tmp_4=c;
{
a=A[(idx+512)];
}
{
b=A[((idx+512)+((262144/128)/2))];
}
c=(a+b);
tmp_5=c;
a=tmp_4;
b=tmp_5;
c=(a+b);
shared_1[(tidx+0)]=c;
__syncthreads();
if ((idx<256))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+256)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<128))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+128)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<64))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+64)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<32))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+32)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<16))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+16)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<8))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+8)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<4))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+4)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<2))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+2)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<1))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+1)];
c=(a+b);
{
A[idx]=c;
}
}
}
#define COALESCED_NUM 16
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimX 65536
#define globalDimY 1
__global__ void reduction_complex_opt_0(float * A, float * B, int size, int segSize)
{
#pragma gCompiler gValue segSize 262144
int k;
float sum;
int nidx;
__shared__ float shared_0[512];
nidx=((((tidx/16)*2048)+(idx&15))+((idx/512)*16));
float tmp_4;
float tmp_5;
float tmp_2;
float tmp_3;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[(nidx+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_2=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[((nidx+131072)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_3=sum;
__syncthreads();
float a;
float b;
float c;
a=tmp_2;
b=tmp_3;
c=(a+b);
tmp_4=c;
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[((nidx+65536)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_2=sum;
__syncthreads();
sum=0;
for (k=0; k<size; k=(k+262144))
{
float real;
float img;
struct float2 * tmp_0;
struct float2 tmp_1;
tmp_0=((struct float2 * )A);
tmp_1=tmp_0[(((nidx+65536)+131072)+k)];
real=tmp_1.x;
img=tmp_1.y;
sum+=real;
sum+=img;
}
tmp_3=sum;
__syncthreads();
a=tmp_2;
b=tmp_3;
c=(a+b);
tmp_5=c;
a=tmp_4;
b=tmp_5;
c=(a+b);
shared_0[(tidx+0)]=c;
__syncthreads();
if ((nidx<32768))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+256)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<16384))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+128)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<8192))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+64)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<4096))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+32)];
c=(a+b);
shared_0[(tidx+0)]=c;
}
__syncthreads();
if ((nidx<2048))
{
float a;
float b;
float c;
a=shared_0[(tidx+0)];
b=shared_0[(tidx+16)];
c=(a+b);
{
B[nidx]=c;
}
}
}
#define COALESCED_NUM 16
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimX 512
#define globalDimY 1
__global__ void reduction_complex_opt_1(float * A, float * B, int size, int segSize)
{
#pragma gCompiler gValue segSize 262144
__shared__ float shared_1[512];
float tmp_6;
float tmp_7;
float a;
float b;
float c;
{
a=B[idx];
}
{
b=B[(idx+((262144/128)/2))];
}
c=(a+b);
tmp_6=c;
{
a=B[(idx+512)];
}
{
b=B[((idx+512)+((262144/128)/2))];
}
c=(a+b);
tmp_7=c;
a=tmp_6;
b=tmp_7;
c=(a+b);
shared_1[(tidx+0)]=c;
__syncthreads();
if ((idx<256))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+256)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<128))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+128)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<64))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+64)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<32))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+32)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<16))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+16)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<8))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+8)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<4))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+4)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<2))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+2)];
c=(a+b);
shared_1[(tidx+0)]=c;
}
__syncthreads();
if ((idx<1))
{
float a;
float b;
float c;
a=shared_1[(tidx+0)];
b=shared_1[(tidx+1)];
c=(a+b);
{
B[idx]=c;
}
}
}
#endif // #ifndef _REDUCTION_KERNEL_H_
|
203718c8c30e6ca9c2c8cfad2c77e25c71b86ef1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
struct MyStruct
{
int a;
int b;
int c;
};
int main(void)
{
int *ptr;
char *ptr1;
int **ptr2;
int(*ptr3)[3];
int *(*ptr4)[4];
int temp = sizeof(*ptr);
printf("result is :%d\n",temp);
int array[20];
int *ptr5 = array;
for (int i = 0; i < 20; i++)
{
array[i] = i;
}
for (int i = 0; i < 20; i++)
{
(*ptr5)++;
ptr5++;
}
for (int i = 0; i < 20; i++)
{
printf("number %d: value %d\n", i, array[i]);
}
int a1 = 12;
int b1;
int *p1;
int **ptr6;
p1 = &a1;
*p1 = 24;
ptr6 = &p1;
*ptr6 = &b1;
**ptr6 = 34;
printf("a1 is %d,b1 is %d,*p1 is %d,**ptr6 is %d\n", a1, b1, *p1, **ptr6);
char *str1[3] = {
"Hello,this is a sample!\n",
"Hi,good morning\n",
"Hello world\n"
};
char s[80];
strcpy(s, str1[0]);
printf(s);
cout << **str1 << endl;
strcpy(s, str1[1]);
printf(s);
strcpy(s, str1[2]);
printf(s);
MyStruct ss = { 20,30,40 };
MyStruct *ptr7 = &ss;
int *pstr = (int*)&ss;
cout << sizeof(ptr7) << " and " << sizeof(pstr) << endl;
cout << ptr7->a << " " << ptr7->b << " " << ptr7->c << endl;
cout << *pstr << " " << *(pstr + 1) << " " << *(pstr + 2) << endl;
int fun1(char*, int);
int(*pfun1)(char*, int);
pfun1 = fun1;
int res = (*pfun1)("abcdefg", 7);
}
int fun1(char* x1, int x2){
cout << x1 << " " << x2 << endl;
return 0;
}
| 203718c8c30e6ca9c2c8cfad2c77e25c71b86ef1.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
struct MyStruct
{
int a;
int b;
int c;
};
int main(void)
{
int *ptr;
char *ptr1;
int **ptr2;
int(*ptr3)[3];
int *(*ptr4)[4];
int temp = sizeof(*ptr);
printf("result is :%d\n",temp);
int array[20];
int *ptr5 = array;
for (int i = 0; i < 20; i++)
{
array[i] = i;
}
for (int i = 0; i < 20; i++)
{
(*ptr5)++;
ptr5++;
}
for (int i = 0; i < 20; i++)
{
printf("number %d: value %d\n", i, array[i]);
}
int a1 = 12;
int b1;
int *p1;
int **ptr6;
p1 = &a1;
*p1 = 24;
ptr6 = &p1;
*ptr6 = &b1;
**ptr6 = 34;
printf("a1 is %d,b1 is %d,*p1 is %d,**ptr6 is %d\n", a1, b1, *p1, **ptr6);
char *str1[3] = {
"Hello,this is a sample!\n",
"Hi,good morning\n",
"Hello world\n"
};
char s[80];
strcpy(s, str1[0]);
printf(s);
cout << **str1 << endl;
strcpy(s, str1[1]);
printf(s);
strcpy(s, str1[2]);
printf(s);
MyStruct ss = { 20,30,40 };
MyStruct *ptr7 = &ss;
int *pstr = (int*)&ss;
cout << sizeof(ptr7) << " and " << sizeof(pstr) << endl;
cout << ptr7->a << " " << ptr7->b << " " << ptr7->c << endl;
cout << *pstr << " " << *(pstr + 1) << " " << *(pstr + 2) << endl;
int fun1(char*, int);
int(*pfun1)(char*, int);
pfun1 = fun1;
int res = (*pfun1)("abcdefg", 7);
}
int fun1(char* x1, int x2){
cout << x1 << " " << x2 << endl;
return 0;
}
|
9d32e11d24e9d6381e12b393a7c76d90da32ba39.hip | // !!! This is a file automatically generated by hipify!!!
//Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the interior, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "../utils.h"
#include <thrust/host_vector.h>
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_destImg, //IN
uchar4* const h_blendedImg) //OUT
{
/* To Recap here are the steps you need to implement
1) Compute a mask of the pixels from the source image to be copied
The pixels that shouldn't be copied are completely white, they
have R=255, G=255, B=255. Any other pixels SHOULD be copied.
2) Compute the interior and border regions of the mask. An interior
pixel has all 4 neighbors also inside the mask. A border pixel is
in the mask itself, but has at least one neighbor that isn't.
3) Separate out the incoming image into three separate channels
4) Create two float(!) buffers for each color channel that will
act as our guesses. Initialize them to the respective color
channel of the source image since that will act as our intial guess.
5) For each color channel perform the Jacobi iteration described
above 800 times.
6) Create the output image by replacing all the interior pixels
in the destination image with the result of the Jacobi iterations.
Just cast the floating point values to unsigned chars since we have
already made sure to clamp them to the correct range.
Since this is final assignment we provide little boilerplate code to
help you. Notice that all the input/output pointers are HOST pointers.
You will have to allocate all of your own GPU memory and perform your own
memcopies to get data in and out of the GPU memory.
Remember to wrap all of your calls with checkCudaErrors() to catch any
thing that might go wrong. After each kernel call do:
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
to catch any errors that happened while executing the kernel.
*/
}
| 9d32e11d24e9d6381e12b393a7c76d90da32ba39.cu | //Udacity HW 6
//Poisson Blending
/* Background
==========
The goal for this assignment is to take one image (the source) and
paste it into another image (the destination) attempting to match the
two images so that the pasting is non-obvious. This is
known as a "seamless clone".
The basic ideas are as follows:
1) Figure out the interior and border of the source image
2) Use the values of the border pixels in the destination image
as boundary conditions for solving a Poisson equation that tells
us how to blend the images.
No pixels from the destination except pixels on the border
are used to compute the match.
Solving the Poisson Equation
============================
There are multiple ways to solve this equation - we choose an iterative
method - specifically the Jacobi method. Iterative methods start with
a guess of the solution and then iterate to try and improve the guess
until it stops changing. If the problem was well-suited for the method
then it will stop and where it stops will be the solution.
The Jacobi method is the simplest iterative method and converges slowly -
that is we need a lot of iterations to get to the answer, but it is the
easiest method to write.
Jacobi Iterations
=================
Our initial guess is going to be the source image itself. This is a pretty
good guess for what the blended image will look like and it means that
we won't have to do as many iterations compared to if we had started far
from the final solution.
ImageGuess_prev (Floating point)
ImageGuess_next (Floating point)
DestinationImg
SourceImg
Follow these steps to implement one iteration:
1) For every pixel p in the interior, compute two sums over the four neighboring pixels:
Sum1: If the neighbor is in the interior then += ImageGuess_prev[neighbor]
else if the neighbor in on the border then += DestinationImg[neighbor]
Sum2: += SourceImg[p] - SourceImg[neighbor] (for all four neighbors)
2) Calculate the new pixel value:
float newVal= (Sum1 + Sum2) / 4.f <------ Notice that the result is FLOATING POINT
ImageGuess_next[p] = min(255, max(0, newVal)); //clamp to [0, 255]
In this assignment we will do 800 iterations.
*/
#include "../utils.h"
#include <thrust/host_vector.h>
void your_blend(const uchar4* const h_sourceImg, //IN
const size_t numRowsSource, const size_t numColsSource,
const uchar4* const h_destImg, //IN
uchar4* const h_blendedImg) //OUT
{
/* To Recap here are the steps you need to implement
1) Compute a mask of the pixels from the source image to be copied
The pixels that shouldn't be copied are completely white, they
have R=255, G=255, B=255. Any other pixels SHOULD be copied.
2) Compute the interior and border regions of the mask. An interior
pixel has all 4 neighbors also inside the mask. A border pixel is
in the mask itself, but has at least one neighbor that isn't.
3) Separate out the incoming image into three separate channels
4) Create two float(!) buffers for each color channel that will
act as our guesses. Initialize them to the respective color
channel of the source image since that will act as our intial guess.
5) For each color channel perform the Jacobi iteration described
above 800 times.
6) Create the output image by replacing all the interior pixels
in the destination image with the result of the Jacobi iterations.
Just cast the floating point values to unsigned chars since we have
already made sure to clamp them to the correct range.
Since this is final assignment we provide little boilerplate code to
help you. Notice that all the input/output pointers are HOST pointers.
You will have to allocate all of your own GPU memory and perform your own
memcopies to get data in and out of the GPU memory.
Remember to wrap all of your calls with checkCudaErrors() to catch any
thing that might go wrong. After each kernel call do:
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
to catch any errors that happened while executing the kernel.
*/
}
|
b4e9ab34a9df9682243f75334ede49285c6da7ef.hip | // !!! This is a file automatically generated by hipify!!!
//**TurboLog-MAP**//
//**AWGNBPSK,Eb/N0=1dB,Lc=4Eb/N0**//
#include<stdio.h>
#include<math.h>
#include <time.h>
#include <stdlib.h>
#include <cuda_device_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include "device_launch_parameters.h"
#include<fstream>
using namespace std;
#define SIZE 8
#define L 3
#define Lc 40
#define Es 1
#define Pi 3.14159265358979
#define Epsilon myexp(1)
#pragma warning(disable:4996)
double __device__ myexp(double x) {
int i, k, m, t;
int xm = (int)x;
double sum;
double e;
double ef;
double z;
double sub = x - xm;
m = 1; //
e = 1.0; //exm
ef = 1.0;
t = 10; //
z = 1; //
sum = 1;
// printf("x=%f\n",x);
// printf("sub=%f\n",sub);
if (xm < 0) { //xm0
xm = (-xm);
for (k = 0; k < xm; k++) { ef *= 2.718281; }
e /= ef;
}
else { for (k = 0; k < xm; k++) { e *= 2.718281; } }
// printf("e=%f\n",e);
// printf("xm=%d\n",xm);
for (i = 1; i < t; i++) {
m *= i;
z *= sub;
sum += z / m;
}
return sum * e;
}
double __device__ mk(double a, double s, double p, int c, int u) //Mk(e)//
{
double mk;
if (u == 1)
mk = a - log(1 + myexp(a)) + 1 / 2 * s + 1 / 2 * p * (2 *double(c) - 1);
else
mk = -log(1 + myexp(a)) - 1 / 2 * s + 1 / 2 * p * (2 * double(c) - 1);
return(mk);
}
double __device__ abk(double t1, double k1, double t2, double k2) //Ak(e)Bk(e)//
{
double s1, s2, s;
s1 = exp(t1 + k1); s2 = exp(t2 + k2);
s = log(s1 + s2);
return(s);
}
////
void __device__ DEC(double a[SIZE + 1], double ys[SIZE + 1], double yp[SIZE + 1], double e[SIZE])
{
double me1[SIZE + 1], me2[SIZE + 1], me3[SIZE + 1], me4[SIZE + 1], me5[SIZE + 1], me6[SIZE + 1], me7[SIZE + 1], me8[SIZE + 1];
double a0[SIZE], a1[SIZE], a2[SIZE], a3[SIZE], b0[SIZE + 1], b1[SIZE + 1], b2[SIZE + 1], b3[SIZE + 1];
int i, u, c;
for (i = 1; i <= SIZE; i++)
{
c = 0; u = 0;
me1[i] = mk (a[i], ys[i], yp[i], c, u);
c = 1; u = 1;
me2[i] = mk (a[i], ys[i], yp[i], c, u);
c = 0; u = 1;
me3[i] = mk (a[i], ys[i], yp[i], c, u);
c = 1; u = 0;
me4[i] = mk (a[i], ys[i], yp[i], c, u);
c = 0; u = 0;
me5[i] = mk (a[i], ys[i], yp[i], c, u);
c = 1; u = 1;
me6[i] = mk (a[i], ys[i], yp[i], c, u);
c = 0; u = 1;
me7[i] = mk (a[i], ys[i], yp[i], c, u);
c = 1; u = 0;
me8[i] = mk (a[i], ys[i], yp[i], c, u);
}
a0[0] = 1; a1[0] = 0; a2[0] = 0; a3[0] = 0;
b0[SIZE] = 0; b1[SIZE] = 0; b2[SIZE] = 1; b3[SIZE] = 0;
for (i = 1; i < SIZE; i++)
{
a0[i] = abk(a0[i - 1], me1[i], a2[i - 1], me6[i]);
a1[i] = abk(a0[i - 1], me2[i], a2[i - 1], me5[i]);
a2[i] = abk(a1[i - 1], me4[i], a3[i - 1], me7[i]);
a3[i] = abk(a1[i - 1], me3[i], a3[i - 1], me8[i]);
}
for (i = SIZE - 1; i >= 1; i--)
{
b0[i] = abk(b0[i + 1], me1[i + 1], b1[i + 1], me2[i + 1]);
b1[i] = abk(b3[i + 1], me3[i + 1], b2[i + 1], me4[i + 1]);
b2[i] = abk(b1[i + 1], me5[i + 1], b0[i + 1], me6[i + 1]);
b3[i] = abk(b2[i + 1], me7[i + 1], b3[i + 1], me8[i + 1]);
}
for (i = 1; i < SIZE; i++)
e[i] = log(myexp(a0[i - 1] + 1 / 2 * yp[i] + b1[i]) + myexp(a1[i - 1] - 1 / 2 * yp[i] + b2[i]) + myexp(a2[i - 1] + 1 / 2 * yp[i] + b0[i]) + myexp(a3[i - 1] - 1 / 2 * yp[i] + b3[i])) - log(myexp(a0[i - 1] - 1 / 2 * yp[i] + b0[i]) + myexp(a1[i - 1] + 1 / 2 * yp[i] + b3[i]) + myexp(a2[i - 1] - 1 / 2 * yp[i] + b1[i]) + myexp(a3[i - 1] + 1 / 2 * yp[i] + b2[i]));
}
void __device__ interlace(double a[SIZE + 1], double b[SIZE + 1]) ////
{
int i;
for (i = 1; i < SIZE + 1; i++) //
{
b[i] = a[SIZE + 1 - i];
}
}
void __device__ uninterlace(double a[SIZE + 1], double b[SIZE + 1]) ////
{
int i;
for (i = 1; i < SIZE + 1; i++) //
{
b[i] = a[SIZE + 1 - i];
}
}
__global__ void cudadecode(double *dataA, double *A)
{
int x[SIZE + 1][SIZE + 1], y[SIZE + 1][3], y0[SIZE + 1], y1[SIZE + 1], y2[SIZE + 1];
double y0_in[SIZE + 1], y00_in[SIZE + 1], y1_in[SIZE + 1], y2_in[SIZE + 1];
double a[SIZE + 1], e[SIZE+1];
double out1[SIZE + 1], out2[SIZE + 1];
int i, j, k;
int data1[16];
int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < 250000)//
{
k = 0;
for (i = 15; i >= 0; i--)
{
data1[i] = int(dataA[p] - floor(dataA[p] / 10) * 10);
dataA[p] /= 10;
}
for (i = 1; i <= SIZE; i++)
for (j = 1; j < 3; j++)
{
x[i][j] = data1[k];
k++;
if (k == 15)
k = 0;
}
for (i = 1; i <= SIZE; i++)
for (j = 1; j < 3; j++)
{
y[i][j] = double(2 * x[i][j] - 1);
}
for (i = 1; i <= SIZE; i++) ////
for (j = 1; j < 3; j++)
{
if (j == 1)
{
y0[i] = y[i][j];
y0_in[i] = Lc * double(y0[i]);
}
else
{
if (i % 2 == 1)
{
y1[i] = y[i][j];
y1_in[i] = Lc * double(y1[i]);
y2[i] = 0;
y2_in[i] = Lc * double(y2[i]);
}
else
{
y1[i] = 0;
y1_in[i] = Lc * double(y1[i]);
y2[i] = y[i][j];
y2_in[i] = Lc * double(y2[i]);
}
}
}
for (i = 1; i <= SIZE; i++)
a[i] = 0;
interlace(y0_in, y00_in);
for (k = 1; k < 6; k++) //6//
{
DEC(a, y0_in, y1_in, e);
interlace(e, a);
DEC(a, y00_in, y2_in, e);
uninterlace(e, a);
}
DEC(a, y0_in, y1_in, e);
interlace(e, a);
DEC(a, y00_in, y2_in, e);
for (i = 1; i <= SIZE; i++)
out1[i] = a[i] + e[i] + y00_in[i];
uninterlace(out1, out2);
for (i = 1; i <= SIZE; i++) ////
{
if (out2[i] >= 0)
A[8 * p + i - 1] = 1;
else
A[8 * p + i - 1] = 0;
;
}
}
}
int main()
{
int i, j, k;
double* dataA = (double*)malloc(sizeof(double) * 250000);//
double* dataB = (double*)malloc(sizeof(double) * 250000);
double* dataC = (double*)malloc(sizeof(double) * 250000);
double *A = (double*)malloc(sizeof(double) * 8 * 250000);//
double *B = (double*)malloc(sizeof(double) * 8 * 250000);
double *C = (double*)malloc(sizeof(double) * 8 * 250000);
double *d_dataA, *d_dataB, *d_dataC, *d_dataAA, *d_dataBB, *d_dataCC;
hipMalloc((void**)&d_dataA, sizeof(double) * 250000);//
hipMalloc((void**)&d_dataB, sizeof(double) * 250000);
hipMalloc((void**)&d_dataC, sizeof(double) * 250000);
hipMalloc((void**)&d_dataAA, sizeof(double) * 8 * 250000);//
hipMalloc((void**)&d_dataBB, sizeof(double) * 8 * 250000);
hipMalloc((void**)&d_dataCC, sizeof(double) * 8 * 250000);
FILE* fw_red = fopen("E:\\matin\\visual studio projects\\turbo_encode\\red_encode.txt", "r");
for (j = 0; j < 250000; j++)
fscanf(fw_red, "%lf", &dataA[j]);
fclose(fw_red); //red
FILE* fw_green = fopen("E:\\matin\\visual studio projects\\turbo_encode\\green_encode.txt", "r");
for (j = 0; j < 250000; j++)
fscanf(fw_green, "lf", &dataB[j]);//green
fclose(fw_green);
FILE* fw_blue = fopen("E:\\matin\\visual studio projects\\turbo_encode\\blue_encode.txt", "r");
for (j = 0; j < 250000; j++)
fscanf(fw_blue, "%lf", &dataC[j]);//blue
fclose(fw_blue);
clock_t start = clock();
hipMemcpy(d_dataA, dataA, sizeof(double) * 250000, hipMemcpyHostToDevice);//
hipMemcpy(d_dataAA, A, sizeof(double) * 8 * 250000, hipMemcpyHostToDevice);//
hipMemcpy(d_dataB, dataB, sizeof(double) * 250000, hipMemcpyHostToDevice);//
hipMemcpy(d_dataBB, B, sizeof(double) * 8 * 250000, hipMemcpyHostToDevice);
hipMemcpy(d_dataC, dataC, sizeof(double) * 250000, hipMemcpyHostToDevice);//
hipMemcpy(d_dataCC, C, sizeof(double) * 8 * 250000, hipMemcpyHostToDevice);
cudadecode << <256, 1024 >> > (d_dataA, d_dataAA);//cuda
cudadecode << <256, 1024 >> > (d_dataB, d_dataBB);//cuda
cudadecode << <256, 1024 >> > (d_dataC, d_dataCC);//cuda
hipMemcpy(A, d_dataAA, sizeof(double) * 8 * 250000, hipMemcpyDeviceToHost);//cuda
hipMemcpy(B, d_dataBB, sizeof(double)* 8 * 250000, hipMemcpyDeviceToHost);
hipMemcpy(C, d_dataCC, sizeof(double) * 8 * 250000, hipMemcpyDeviceToHost);
clock_t end = clock();
double endtime = (double)(end - start) / CLOCKS_PER_SEC;
cout << "totaltime:" << endtime * 1000 << "ms" << endl;
ofstream out_red("E:\\matin\\visual studio projects\\turbo_encode\\red_decode.txt");
for (int k = 0; k < 250000 * 8; k++)//rgb
{
out_red << A[k];
if ((k + 1) % 8 == 0)
{
out_red << "\n";
}
}
out_red.close();
ofstream out_green("E:\\matin\\visual studio projects\\turbo_encode\\green_decode.txt");
for (int k = 0; k < 250000 * 8; k++)//rgb
{
out_green << B[k];
if ((k + 1) % 8 == 0)
{
out_green << "\n";
}
}
out_green.close();
ofstream out_blue("E:\\matin\\visual studio projects\\turbo_encode\\blue_decode.txt");
for (int k = 0; k < 250000 * 8; k++)//rgb
{
out_blue << C[k];
if ((k + 1) % 8 == 0)
{
out_blue << "\n";
}
}
free(dataA);
free(dataB);
free(dataC);
hipFree(d_dataA);
hipFree(d_dataB);
hipFree(d_dataC);
hipFree(d_dataAA);
hipFree(d_dataBB);
hipFree(d_dataCC);
}
| b4e9ab34a9df9682243f75334ede49285c6da7ef.cu | //**Turbo码译码,Log-MAP算法**//
//**假设是AWGN信道上的BPSK传输,信噪比Eb/N0=1dB,Lc=4Eb/N0**//
#include<stdio.h>
#include<math.h>
#include <time.h>
#include <stdlib.h>
#include <cuda_device_runtime_api.h>
#include <cuda_runtime.h>
#include <iostream>
#include "device_launch_parameters.h"
#include<fstream>
using namespace std;
#define SIZE 8
#define L 3
#define Lc 40
#define Es 1
#define Pi 3.14159265358979
#define Epsilon myexp(1)
#pragma warning(disable:4996)
double __device__ myexp(double x) {
int i, k, m, t;
int xm = (int)x;
double sum;
double e;
double ef;
double z;
double sub = x - xm;
m = 1; //阶乘算法分母
e = 1.0; //e的xm
ef = 1.0;
t = 10; //算法精度
z = 1; //分子初始化
sum = 1;
// printf("x=%f\n",x);
// printf("sub=%f\n",sub);
if (xm < 0) { //判断xm是否大于0?
xm = (-xm);
for (k = 0; k < xm; k++) { ef *= 2.718281; }
e /= ef;
}
else { for (k = 0; k < xm; k++) { e *= 2.718281; } }
// printf("e=%f\n",e);
// printf("xm=%d\n",xm);
for (i = 1; i < t; i++) {
m *= i;
z *= sub;
sum += z / m;
}
return sum * e;
}
double __device__ mk(double a, double s, double p, int c, int u) //Mk(e)的计算//
{
double mk;
if (u == 1)
mk = a - log(1 + myexp(a)) + 1 / 2 * s + 1 / 2 * p * (2 *double(c) - 1);
else
mk = -log(1 + myexp(a)) - 1 / 2 * s + 1 / 2 * p * (2 * double(c) - 1);
return(mk);
}
double __device__ abk(double t1, double k1, double t2, double k2) //Ak(e)和Bk(e)的计算//
{
double s1, s2, s;
s1 = exp(t1 + k1); s2 = exp(t2 + k2);
s = log(s1 + s2);
return(s);
}
//分量译码器//
void __device__ DEC(double a[SIZE + 1], double ys[SIZE + 1], double yp[SIZE + 1], double e[SIZE])
{
double me1[SIZE + 1], me2[SIZE + 1], me3[SIZE + 1], me4[SIZE + 1], me5[SIZE + 1], me6[SIZE + 1], me7[SIZE + 1], me8[SIZE + 1];
double a0[SIZE], a1[SIZE], a2[SIZE], a3[SIZE], b0[SIZE + 1], b1[SIZE + 1], b2[SIZE + 1], b3[SIZE + 1];
int i, u, c;
for (i = 1; i <= SIZE; i++)
{
c = 0; u = 0;
me1[i] = mk (a[i], ys[i], yp[i], c, u);
c = 1; u = 1;
me2[i] = mk (a[i], ys[i], yp[i], c, u);
c = 0; u = 1;
me3[i] = mk (a[i], ys[i], yp[i], c, u);
c = 1; u = 0;
me4[i] = mk (a[i], ys[i], yp[i], c, u);
c = 0; u = 0;
me5[i] = mk (a[i], ys[i], yp[i], c, u);
c = 1; u = 1;
me6[i] = mk (a[i], ys[i], yp[i], c, u);
c = 0; u = 1;
me7[i] = mk (a[i], ys[i], yp[i], c, u);
c = 1; u = 0;
me8[i] = mk (a[i], ys[i], yp[i], c, u);
}
a0[0] = 1; a1[0] = 0; a2[0] = 0; a3[0] = 0;
b0[SIZE] = 0; b1[SIZE] = 0; b2[SIZE] = 1; b3[SIZE] = 0;
for (i = 1; i < SIZE; i++)
{
a0[i] = abk(a0[i - 1], me1[i], a2[i - 1], me6[i]);
a1[i] = abk(a0[i - 1], me2[i], a2[i - 1], me5[i]);
a2[i] = abk(a1[i - 1], me4[i], a3[i - 1], me7[i]);
a3[i] = abk(a1[i - 1], me3[i], a3[i - 1], me8[i]);
}
for (i = SIZE - 1; i >= 1; i--)
{
b0[i] = abk(b0[i + 1], me1[i + 1], b1[i + 1], me2[i + 1]);
b1[i] = abk(b3[i + 1], me3[i + 1], b2[i + 1], me4[i + 1]);
b2[i] = abk(b1[i + 1], me5[i + 1], b0[i + 1], me6[i + 1]);
b3[i] = abk(b2[i + 1], me7[i + 1], b3[i + 1], me8[i + 1]);
}
for (i = 1; i < SIZE; i++)
e[i] = log(myexp(a0[i - 1] + 1 / 2 * yp[i] + b1[i]) + myexp(a1[i - 1] - 1 / 2 * yp[i] + b2[i]) + myexp(a2[i - 1] + 1 / 2 * yp[i] + b0[i]) + myexp(a3[i - 1] - 1 / 2 * yp[i] + b3[i])) - log(myexp(a0[i - 1] - 1 / 2 * yp[i] + b0[i]) + myexp(a1[i - 1] + 1 / 2 * yp[i] + b3[i]) + myexp(a2[i - 1] - 1 / 2 * yp[i] + b1[i]) + myexp(a3[i - 1] + 1 / 2 * yp[i] + b2[i]));
}
void __device__ interlace(double a[SIZE + 1], double b[SIZE + 1]) //交织器//
{
int i;
for (i = 1; i < SIZE + 1; i++) //倒叙交织器
{
b[i] = a[SIZE + 1 - i];
}
}
void __device__ uninterlace(double a[SIZE + 1], double b[SIZE + 1]) //解交织器//
{
int i;
for (i = 1; i < SIZE + 1; i++) //倒叙解交织
{
b[i] = a[SIZE + 1 - i];
}
}
__global__ void cudadecode(double *dataA, double *A)
{
int x[SIZE + 1][SIZE + 1], y[SIZE + 1][3], y0[SIZE + 1], y1[SIZE + 1], y2[SIZE + 1];
double y0_in[SIZE + 1], y00_in[SIZE + 1], y1_in[SIZE + 1], y2_in[SIZE + 1];
double a[SIZE + 1], e[SIZE+1];
double out1[SIZE + 1], out2[SIZE + 1];
int i, j, k;
int data1[16];
int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < 250000)//线程量约束,可根据数据量大小更改
{
k = 0;
for (i = 15; i >= 0; i--)
{
data1[i] = int(dataA[p] - floor(dataA[p] / 10) * 10);
dataA[p] /= 10;
}
for (i = 1; i <= SIZE; i++)
for (j = 1; j < 3; j++)
{
x[i][j] = data1[k];
k++;
if (k == 15)
k = 0;
}
for (i = 1; i <= SIZE; i++)
for (j = 1; j < 3; j++)
{
y[i][j] = double(2 * x[i][j] - 1);
}
for (i = 1; i <= SIZE; i++) //串并转换,信道置信度加权//
for (j = 1; j < 3; j++)
{
if (j == 1)
{
y0[i] = y[i][j];
y0_in[i] = Lc * double(y0[i]);
}
else
{
if (i % 2 == 1)
{
y1[i] = y[i][j];
y1_in[i] = Lc * double(y1[i]);
y2[i] = 0;
y2_in[i] = Lc * double(y2[i]);
}
else
{
y1[i] = 0;
y1_in[i] = Lc * double(y1[i]);
y2[i] = y[i][j];
y2_in[i] = Lc * double(y2[i]);
}
}
}
for (i = 1; i <= SIZE; i++)
a[i] = 0;
interlace(y0_in, y00_in);
for (k = 1; k < 6; k++) //迭代6次//
{
DEC(a, y0_in, y1_in, e);
interlace(e, a);
DEC(a, y00_in, y2_in, e);
uninterlace(e, a);
}
DEC(a, y0_in, y1_in, e);
interlace(e, a);
DEC(a, y00_in, y2_in, e);
for (i = 1; i <= SIZE; i++)
out1[i] = a[i] + e[i] + y00_in[i];
uninterlace(out1, out2);
for (i = 1; i <= SIZE; i++) //硬判决,输出译码后的码字//
{
if (out2[i] >= 0)
A[8 * p + i - 1] = 1;
else
A[8 * p + i - 1] = 0;
;
}
}
}
int main()
{
int i, j, k;
double* dataA = (double*)malloc(sizeof(double) * 250000);//开三色空间
double* dataB = (double*)malloc(sizeof(double) * 250000);
double* dataC = (double*)malloc(sizeof(double) * 250000);
double *A = (double*)malloc(sizeof(double) * 8 * 250000);//开三色导出数组空间
double *B = (double*)malloc(sizeof(double) * 8 * 250000);
double *C = (double*)malloc(sizeof(double) * 8 * 250000);
double *d_dataA, *d_dataB, *d_dataC, *d_dataAA, *d_dataBB, *d_dataCC;
cudaMalloc((void**)&d_dataA, sizeof(double) * 250000);//开三色显存空间
cudaMalloc((void**)&d_dataB, sizeof(double) * 250000);
cudaMalloc((void**)&d_dataC, sizeof(double) * 250000);
cudaMalloc((void**)&d_dataAA, sizeof(double) * 8 * 250000);//开三色显存导出空间
cudaMalloc((void**)&d_dataBB, sizeof(double) * 8 * 250000);
cudaMalloc((void**)&d_dataCC, sizeof(double) * 8 * 250000);
FILE* fw_red = fopen("E:\\matin\\visual studio projects\\turbo_encode\\red_encode.txt", "r");
for (j = 0; j < 250000; j++)
fscanf(fw_red, "%lf", &dataA[j]);
fclose(fw_red); //读取red
FILE* fw_green = fopen("E:\\matin\\visual studio projects\\turbo_encode\\green_encode.txt", "r");
for (j = 0; j < 250000; j++)
fscanf(fw_green, "lf", &dataB[j]);//读取green
fclose(fw_green);
FILE* fw_blue = fopen("E:\\matin\\visual studio projects\\turbo_encode\\blue_encode.txt", "r");
for (j = 0; j < 250000; j++)
fscanf(fw_blue, "%lf", &dataC[j]);//读取blue
fclose(fw_blue);
clock_t start = clock();
cudaMemcpy(d_dataA, dataA, sizeof(double) * 250000, cudaMemcpyHostToDevice);//原红色数据导入显存
cudaMemcpy(d_dataAA, A, sizeof(double) * 8 * 250000, cudaMemcpyHostToDevice);//红色数组空间导入显存
cudaMemcpy(d_dataB, dataB, sizeof(double) * 250000, cudaMemcpyHostToDevice);//绿色
cudaMemcpy(d_dataBB, B, sizeof(double) * 8 * 250000, cudaMemcpyHostToDevice);
cudaMemcpy(d_dataC, dataC, sizeof(double) * 250000, cudaMemcpyHostToDevice);//蓝色
cudaMemcpy(d_dataCC, C, sizeof(double) * 8 * 250000, cudaMemcpyHostToDevice);
cudadecode << <256, 1024 >> > (d_dataA, d_dataAA);//cuda译码运算
cudadecode << <256, 1024 >> > (d_dataB, d_dataBB);//cuda译码运算
cudadecode << <256, 1024 >> > (d_dataC, d_dataCC);//cuda译码运算
cudaMemcpy(A, d_dataAA, sizeof(double) * 8 * 250000, cudaMemcpyDeviceToHost);//cuda译码运算结果从显存导入内存
cudaMemcpy(B, d_dataBB, sizeof(double)* 8 * 250000, cudaMemcpyDeviceToHost);
cudaMemcpy(C, d_dataCC, sizeof(double) * 8 * 250000, cudaMemcpyDeviceToHost);
clock_t end = clock();
double endtime = (double)(end - start) / CLOCKS_PER_SEC;
cout << "totaltime:" << endtime * 1000 << "ms" << endl;
ofstream out_red("E:\\matin\\visual studio projects\\turbo_encode\\red_decode.txt");
for (int k = 0; k < 250000 * 8; k++)//rgb编码输出
{
out_red << A[k];
if ((k + 1) % 8 == 0)
{
out_red << "\n";
}
}
out_red.close();
ofstream out_green("E:\\matin\\visual studio projects\\turbo_encode\\green_decode.txt");
for (int k = 0; k < 250000 * 8; k++)//rgb编码输出
{
out_green << B[k];
if ((k + 1) % 8 == 0)
{
out_green << "\n";
}
}
out_green.close();
ofstream out_blue("E:\\matin\\visual studio projects\\turbo_encode\\blue_decode.txt");
for (int k = 0; k < 250000 * 8; k++)//rgb编码输出
{
out_blue << C[k];
if ((k + 1) % 8 == 0)
{
out_blue << "\n";
}
}
free(dataA);
free(dataB);
free(dataC);
cudaFree(d_dataA);
cudaFree(d_dataB);
cudaFree(d_dataC);
cudaFree(d_dataAA);
cudaFree(d_dataBB);
cudaFree(d_dataCC);
}
|
404f46c15d9419174523b3b813b90243b13e32f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-07-29
* &Updated by: XIAO Tong (email: [email protected]) 2018-12-26
* Add summation by broadcasting.
* $Update by: Lin Ye (email: [email protected]) 2019-07-24 float16 added
*/
#include "SumDim.cuh"
#include "../../XDevice.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_ROCM
/*
tensor summation of a tensor and a row vector
c = a + b * \beta
where a is a tensor and b is a row vector
>> a - pointer to the data array of a
>> b - pointer to the data array of b
>> c - pointer to the data array of c
>> rowNum - number of rows of a and c
>> colNum - number of columns of a and c (i.e., the size of b)
>> beta - the scaling factor
*/
template <class T, bool betaFired>
__global__
void KernelAddWithRow(T * a, T * b, T * c, int rowNum, int colNum, T beta)
{
__shared__ T bv[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(col >= colNum || row >= rowNum)
return;
if(threadIdx.y == 0)
bv[threadIdx.x] = b[col];
__syncthreads();
int offset = colNum * row + col;
if(betaFired)
c[offset] = a[offset] + bv[threadIdx.x] * beta;
else
c[offset] = a[offset] + bv[threadIdx.x];
}
/*
tensor summation of a tensor and a colum vector
c = a + b * \beta
where a is a tensor and b is a colum vector
>> a - pointer to the data array of a
>> b - pointer to the data array of b
>> c - pointer to the data array of c
>> rowNum - number of rows of a and c (i.e., the size of b)
>> colNum - number of columns of a and c
>> blockNum - size of a block (matrix), i.e., rowNum * colNum
>> blockNum - number of matrics
>> beta - the scaling factor
*/
template <class T, bool betaFired>
__global__
void KernelAddWithCol(T * a, T * b, T * c, int rowNum, int colNum, int blockSize, int blockNum, T beta)
{
__shared__ T bv[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int colIndex = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = colIndex % colNum;
int block = colIndex / colNum;
if(row >= rowNum || block >= blockNum)
return;
if(threadIdx.x == 0)
bv[threadIdx.y] = b[row];
__syncthreads();
int offset = block * blockSize + row * colNum + col;
if(betaFired)
c[offset] = a[offset] + bv[threadIdx.y] * beta;
else
c[offset] = a[offset] + bv[threadIdx.y];
}
/*
tensor summation (cuda version)
c = a + b * \beta
where the size of b is equal to the n-th dimension of a,
i.e., a is summed with b by broadcasting
>> a - a tensor
>> b - another tensor whose size is equal to that of dimension n of a
>> c - where we put a+b*\beta. we save it in a if c is NULL
>> n - the dimension index
>> beta - the scaling factor
*/
void _CudaSumDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE beta)
{
CheckNTErrors(a && b && c, "Empty tensor input!");
CheckNTErrors(a->unitNum == c->unitNum, "Unmatched tensors in addition!");
CheckNTErrors(a->dataType == b->dataType && a->dataType == c->dataType,
"Unmatched data types in addition!");
CheckNTErrors(a->order == c->order, "The input tensors do not have the same order in addition!");
CheckNTErrors(!a->isSparse && !b->isSparse && !c->isSparse, "Dense tensors are required!");
CheckNTErrors(a->dimSize[n] == b->unitNum, "Wrong tensor size!");
int stride = 1;
int blockSize = a->dimSize[n];
int blockNum = 1;
for(int i = a->order - 1; i >= 0; i--){
if(i > n)
stride *= a->dimSize[i];
else if(i < n)
blockNum *= a->dimSize[i];
}
int cudaGrids[3];
int cudaBlocks[3];
int devIDBackup = 0;
ProtectCudaDev(a->devID, devIDBackup);
if (a->dataType == DEFAULT_DTYPE){
if(stride > 1){
GDevs.GetCudaThread2D(a->devID, stride * blockNum, blockSize, MAX_INT, cudaGrids, cudaBlocks);
if(beta == (DTYPE)1.0F)
hipLaunchKernelGGL(( KernelAddWithCol<DTYPE, false>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta);
else
hipLaunchKernelGGL(( KernelAddWithCol<DTYPE, true>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta);
}
else if(stride == 1){
GDevs.GetCudaThread2D(a->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
if(beta == (DTYPE)1.0F)
hipLaunchKernelGGL(( KernelAddWithRow<DTYPE, false>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockNum, blockSize, beta);
else
hipLaunchKernelGGL(( KernelAddWithRow<DTYPE, true>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockNum, blockSize, beta);
}
else{
ShowNTErrors("Something is wrong!");
}
}
else if (a->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
half beta1 = __float2half(beta);
if (stride > 1) {
GDevs.GetCudaThread2D(a->devID, stride * blockNum, blockSize, MAX_INT, cudaGrids, cudaBlocks);
if (beta == (DTYPE)1.0F)
hipLaunchKernelGGL(( KernelAddWithCol<__half, false>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(__half*)a->data, (__half*)b->data, (__half*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta1);
else
hipLaunchKernelGGL(( KernelAddWithCol<__half, true>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(__half*)a->data, (__half*)b->data, (__half*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta1);
}
else if (stride == 1) {
GDevs.GetCudaThread2D(a->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
if (beta == (DTYPE)1.0F)
hipLaunchKernelGGL(( KernelAddWithRow<__half, false>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(__half*)a->data, (__half*)b->data, (__half*)c->data,
blockNum, blockSize, beta1);
else
hipLaunchKernelGGL(( KernelAddWithRow<__half, true>) , dim3(dim3(cudaGrids[0], cudaGrids[1])), dim3(dim3(cudaBlocks[0], cudaBlocks[1])), 0, 0,
(__half*)a->data, (__half*)b->data, (__half*)c->data,
blockNum, blockSize, beta1);
}
else {
ShowNTErrors("Something is wrong!");
}
#else
ShowNTErrors("Recompile the code with HALF_PRECISION!");
#endif
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif
} // namespace nts(NiuTrans.Tensor)
| 404f46c15d9419174523b3b813b90243b13e32f4.cu | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2018, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: [email protected]) 2018-07-29
* &Updated by: XIAO Tong (email: [email protected]) 2018-12-26
* Add summation by broadcasting.
* $Update by: Lin Ye (email: [email protected]) 2019-07-24 float16 added
*/
#include "SumDim.cuh"
#include "../../XDevice.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
tensor summation of a tensor and a row vector
c = a + b * \beta
where a is a tensor and b is a row vector
>> a - pointer to the data array of a
>> b - pointer to the data array of b
>> c - pointer to the data array of c
>> rowNum - number of rows of a and c
>> colNum - number of columns of a and c (i.e., the size of b)
>> beta - the scaling factor
*/
template <class T, bool betaFired>
__global__
void KernelAddWithRow(T * a, T * b, T * c, int rowNum, int colNum, T beta)
{
__shared__ T bv[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if(col >= colNum || row >= rowNum)
return;
if(threadIdx.y == 0)
bv[threadIdx.x] = b[col];
__syncthreads();
int offset = colNum * row + col;
if(betaFired)
c[offset] = a[offset] + bv[threadIdx.x] * beta;
else
c[offset] = a[offset] + bv[threadIdx.x];
}
/*
tensor summation of a tensor and a colum vector
c = a + b * \beta
where a is a tensor and b is a colum vector
>> a - pointer to the data array of a
>> b - pointer to the data array of b
>> c - pointer to the data array of c
>> rowNum - number of rows of a and c (i.e., the size of b)
>> colNum - number of columns of a and c
>> blockNum - size of a block (matrix), i.e., rowNum * colNum
>> blockNum - number of matrics
>> beta - the scaling factor
*/
template <class T, bool betaFired>
__global__
void KernelAddWithCol(T * a, T * b, T * c, int rowNum, int colNum, int blockSize, int blockNum, T beta)
{
__shared__ T bv[MAX_CUDA_THREAD_NUM_PER_BLOCK];
int colIndex = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = colIndex % colNum;
int block = colIndex / colNum;
if(row >= rowNum || block >= blockNum)
return;
if(threadIdx.x == 0)
bv[threadIdx.y] = b[row];
__syncthreads();
int offset = block * blockSize + row * colNum + col;
if(betaFired)
c[offset] = a[offset] + bv[threadIdx.y] * beta;
else
c[offset] = a[offset] + bv[threadIdx.y];
}
/*
tensor summation (cuda version)
c = a + b * \beta
where the size of b is equal to the n-th dimension of a,
i.e., a is summed with b by broadcasting
>> a - a tensor
>> b - another tensor whose size is equal to that of dimension n of a
>> c - where we put a+b*\beta. we save it in a if c is NULL
>> n - the dimension index
>> beta - the scaling factor
*/
void _CudaSumDim(const XTensor * a, const XTensor * b, XTensor * c, int n, DTYPE beta)
{
CheckNTErrors(a && b && c, "Empty tensor input!");
CheckNTErrors(a->unitNum == c->unitNum, "Unmatched tensors in addition!");
CheckNTErrors(a->dataType == b->dataType && a->dataType == c->dataType,
"Unmatched data types in addition!");
CheckNTErrors(a->order == c->order, "The input tensors do not have the same order in addition!");
CheckNTErrors(!a->isSparse && !b->isSparse && !c->isSparse, "Dense tensors are required!");
CheckNTErrors(a->dimSize[n] == b->unitNum, "Wrong tensor size!");
int stride = 1;
int blockSize = a->dimSize[n];
int blockNum = 1;
for(int i = a->order - 1; i >= 0; i--){
if(i > n)
stride *= a->dimSize[i];
else if(i < n)
blockNum *= a->dimSize[i];
}
int cudaGrids[3];
int cudaBlocks[3];
int devIDBackup = 0;
ProtectCudaDev(a->devID, devIDBackup);
if (a->dataType == DEFAULT_DTYPE){
if(stride > 1){
GDevs.GetCudaThread2D(a->devID, stride * blockNum, blockSize, MAX_INT, cudaGrids, cudaBlocks);
if(beta == (DTYPE)1.0F)
KernelAddWithCol<DTYPE, false> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta);
else
KernelAddWithCol<DTYPE, true> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta);
}
else if(stride == 1){
GDevs.GetCudaThread2D(a->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
if(beta == (DTYPE)1.0F)
KernelAddWithRow<DTYPE, false> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockNum, blockSize, beta);
else
KernelAddWithRow<DTYPE, true> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((DTYPE*)a->data, (DTYPE*)b->data, (DTYPE*)c->data,
blockNum, blockSize, beta);
}
else{
ShowNTErrors("Something is wrong!");
}
}
else if (a->dataType == X_FLOAT16) {
#ifdef HALF_PRECISION
half beta1 = __float2half(beta);
if (stride > 1) {
GDevs.GetCudaThread2D(a->devID, stride * blockNum, blockSize, MAX_INT, cudaGrids, cudaBlocks);
if (beta == (DTYPE)1.0F)
KernelAddWithCol<__half, false> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((__half*)a->data, (__half*)b->data, (__half*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta1);
else
KernelAddWithCol<__half, true> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((__half*)a->data, (__half*)b->data, (__half*)c->data,
blockSize, stride, blockSize * stride, blockNum, beta1);
}
else if (stride == 1) {
GDevs.GetCudaThread2D(a->devID, blockSize, blockNum, MAX_INT, cudaGrids, cudaBlocks);
if (beta == (DTYPE)1.0F)
KernelAddWithRow<__half, false> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((__half*)a->data, (__half*)b->data, (__half*)c->data,
blockNum, blockSize, beta1);
else
KernelAddWithRow<__half, true> <<<dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1])>>>
((__half*)a->data, (__half*)b->data, (__half*)c->data,
blockNum, blockSize, beta1);
}
else {
ShowNTErrors("Something is wrong!");
}
#else
ShowNTErrors("Recompile the code with HALF_PRECISION!");
#endif
}
else {
ShowNTErrors("TODO!");
}
BacktoCudaDev(a->devID, devIDBackup);
}
#endif
} // namespace nts(NiuTrans.Tensor)
|
1ea53e0a945692766dbae6ef9bf076de237e1033.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <ATen/ATen.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*2];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*2;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*2+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*2+0];
float y1=xyz[(i*n+j)*2+1];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&2);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*2+2]-x1;
float y2=buf[k*2+3]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*2+4]-x1;
float y2=buf[k*2+5]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*2+6]-x1;
float y2=buf[k*2+7]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*2+2]-x1;
float y2=buf[k*2+3]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*2+4]-x1;
float y2=buf[k*2+5]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*2+6]-x1;
float y2=buf[k*2+7]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, hipStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, n, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
hipLaunchKernelGGL(( NmDistanceKernel), dim3(dim3(32,16,1)),dim3(512), 0, 0, batch_size, m, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd updateOutput: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*2+0];
float y1=xyz1[(i*n+j)*2+1];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*2+0];
float y2=xyz2[(i*m+j2)*2+1];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*2+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*2+1]),g*(y1-y2));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+1]),-(g*(y1-y2)));
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, hipStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// hipMemset(grad_xyz1,0,b*n*3*4);
// hipMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,n,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
hipLaunchKernelGGL(( NmDistanceGradKernel), dim3(dim3(1,16,1)),dim3(256), 0, 0, batch_size,m,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
hipError_t err = hipGetLastError();
if (err != hipSuccess) {
printf("error in nnd get grad: %s\n", hipGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
| 1ea53e0a945692766dbae6ef9bf076de237e1033.cu |
#include <stdio.h>
#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
__global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){
const int batch=512;
__shared__ float buf[batch*2];
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int k2=0;k2<m;k2+=batch){
int end_k=min(m,k2+batch)-k2;
for (int j=threadIdx.x;j<end_k*2;j+=blockDim.x){
buf[j]=xyz2[(i*m+k2)*2+j];
}
__syncthreads();
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz[(i*n+j)*2+0];
float y1=xyz[(i*n+j)*2+1];
int best_i=0;
float best=0;
int end_ka=end_k-(end_k&2);
if (end_ka==batch){
for (int k=0;k<batch;k+=4){
{
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*2+2]-x1;
float y2=buf[k*2+3]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*2+4]-x1;
float y2=buf[k*2+5]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*2+6]-x1;
float y2=buf[k*2+7]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}else{
for (int k=0;k<end_ka;k+=4){
{
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
{
float x2=buf[k*2+2]-x1;
float y2=buf[k*2+3]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+1;
}
}
{
float x2=buf[k*2+4]-x1;
float y2=buf[k*2+5]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+2;
}
}
{
float x2=buf[k*2+6]-x1;
float y2=buf[k*2+7]-y1;
float d=x2*x2+y2*y2;
if (d<best){
best=d;
best_i=k+k2+3;
}
}
}
}
for (int k=end_ka;k<end_k;k++){
float x2=buf[k*2+0]-x1;
float y2=buf[k*2+1]-y1;
float d=x2*x2+y2*y2;
if (k==0 || d<best){
best=d;
best_i=k+k2;
}
}
if (k2==0 || result[(i*n+j)]>best){
result[(i*n+j)]=best;
result_i[(i*n+j)]=best_i;
}
}
__syncthreads();
}
}
}
// int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){
int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, n, xyz1.data<float>(), m, xyz2.data<float>(), dist1.data<float>(), idx1.data<int>());
NmDistanceKernel<<<dim3(32,16,1),512>>>(batch_size, m, xyz2.data<float>(), n, xyz1.data<float>(), dist2.data<float>(), idx2.data<int>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*2+0];
float y1=xyz1[(i*n+j)*2+1];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*2+0];
float y2=xyz2[(i*m+j2)*2+1];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*2+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*2+1]),g*(y1-y2));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+1]),-(g*(y1-y2)));
}
}
}
// int chamfer_cuda_backward(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,const float * grad_dist2,const int * idx2,float * grad_xyz1,float * grad_xyz2, cudaStream_t stream){
int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2){
// cudaMemset(grad_xyz1,0,b*n*3*4);
// cudaMemset(grad_xyz2,0,b*m*3*4);
const auto batch_size = xyz1.size(0);
const auto n = xyz1.size(1); //num_points point cloud A
const auto m = xyz2.size(1); //num_points point cloud B
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,n,xyz1.data<float>(),m,xyz2.data<float>(),graddist1.data<float>(),idx1.data<int>(),gradxyz1.data<float>(),gradxyz2.data<float>());
NmDistanceGradKernel<<<dim3(1,16,1),256>>>(batch_size,m,xyz2.data<float>(),n,xyz1.data<float>(),graddist2.data<float>(),idx2.data<int>(),gradxyz2.data<float>(),gradxyz1.data<float>());
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("error in nnd get grad: %s\n", cudaGetErrorString(err));
//THError("aborting");
return 0;
}
return 1;
}
|
1f300e41ba897eff1f04a4dc075bd67f08ae32b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
void initArray(float* vec, int n) {
int i;
for(i=0; i<n; i++)
vec[i] = rand() % 9 + 1;
}
void initMat(float* mat, int n) {
int i, j;
for(i=0; i<n; i++)
for(j=0; j<n; j++)
mat[i*n+j] = rand() % 9 + 1;
}
void printVec(float* vector, int size)
{
for(int i=0; i<size; i++)
cout << vector[i] << " ";
cout<<endl;
}
void printMat(float *a, int n) {
for(int i=0; i<n; i++){
for (int j=0; j<n; j++)
cout<< a[i*n+j] << " ";
cout<<endl;
}
}
__global__
void mulKernel(float *vec, float *mat, float* c, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x ;
float sum=0;
if(i < n){
for(int j=0; j<n; j++)
sum += vec[j]*mat[(j*n) + i];
c[i]=sum;
}
}
void mulVecMat(float* vec, float* mat, int n){
float* c;
float* dev_a, * dev_b, * dev_c;
cout<<"is oke"<<endl;
vec = (float*)malloc(sizeof(float)*n);
mat = (float*)malloc(sizeof(float)*n*n);
c = (float*)malloc(sizeof(float)*n);
cout<<"is oke"<<endl;
initArray(vec, n);
cout<<"array is okey"<<endl;
initMat(mat, n*n);
cout<<"init ,at is okey"<<endl;
initMat(c, n);
cout<<"init mat 2 is okey"<<endl;
// printVec(vec, n);
// printMat(mat, n*n);
// printVec(c, n);
hipMalloc((void**)&dev_a, sizeof(float)*n);
hipMemcpy(dev_a, vec, sizeof(float)*n, hipMemcpyHostToDevice);
hipMalloc((void**)&dev_b, sizeof(float)*n*n);
hipMemcpy(dev_b, mat, sizeof(float)*n*n, hipMemcpyHostToDevice);
hipMalloc((void**)&dev_c, sizeof(float)*n);
hipLaunchKernelGGL(( mulKernel), dim3(ceil(n/256.0)), dim3(256), 0, 0, dev_a, dev_b, dev_c, n);
hipMemcpy(c, dev_c, sizeof(float)*n, hipMemcpyDeviceToHost);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
// printMat(c, n);
}
int main()
{
// Size of vectors
int n = 100;
// Host input vectors
float* h_a = 0;
float* h_ma = 0;
mulVecMat(h_a, h_ma, n);
return 0;
}
| 1f300e41ba897eff1f04a4dc075bd67f08ae32b8.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <iostream>
using namespace std;
void initArray(float* vec, int n) {
int i;
for(i=0; i<n; i++)
vec[i] = rand() % 9 + 1;
}
void initMat(float* mat, int n) {
int i, j;
for(i=0; i<n; i++)
for(j=0; j<n; j++)
mat[i*n+j] = rand() % 9 + 1;
}
void printVec(float* vector, int size)
{
for(int i=0; i<size; i++)
cout << vector[i] << " ";
cout<<endl;
}
void printMat(float *a, int n) {
for(int i=0; i<n; i++){
for (int j=0; j<n; j++)
cout<< a[i*n+j] << " ";
cout<<endl;
}
}
__global__
void mulKernel(float *vec, float *mat, float* c, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x ;
float sum=0;
if(i < n){
for(int j=0; j<n; j++)
sum += vec[j]*mat[(j*n) + i];
c[i]=sum;
}
}
void mulVecMat(float* vec, float* mat, int n){
float* c;
float* dev_a, * dev_b, * dev_c;
cout<<"is oke"<<endl;
vec = (float*)malloc(sizeof(float)*n);
mat = (float*)malloc(sizeof(float)*n*n);
c = (float*)malloc(sizeof(float)*n);
cout<<"is oke"<<endl;
initArray(vec, n);
cout<<"array is okey"<<endl;
initMat(mat, n*n);
cout<<"init ,at is okey"<<endl;
initMat(c, n);
cout<<"init mat 2 is okey"<<endl;
// printVec(vec, n);
// printMat(mat, n*n);
// printVec(c, n);
cudaMalloc((void**)&dev_a, sizeof(float)*n);
cudaMemcpy(dev_a, vec, sizeof(float)*n, cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_b, sizeof(float)*n*n);
cudaMemcpy(dev_b, mat, sizeof(float)*n*n, cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_c, sizeof(float)*n);
mulKernel<<<ceil(n/256.0), 256>>>(dev_a, dev_b, dev_c, n);
cudaMemcpy(c, dev_c, sizeof(float)*n, cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
// printMat(c, n);
}
int main()
{
// Size of vectors
int n = 100;
// Host input vectors
float* h_a = 0;
float* h_ma = 0;
mulVecMat(h_a, h_ma, n);
return 0;
}
|
0c9ef241b130dfc639063a0ac355ca7d60d02936.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017, The OctNet authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "octnet/gpu/col2oc.h"
#include "octnet/gpu/gpu.h"
#include "octnet/gpu/buffer.h"
#include "octnet/core/z_curve.h"
#define SHARED_N_FEAT 8
__device__
inline bool col2oc_in_vol(const octree* in, const int d, const int h, const int w) {
return d >= 0 && h >= 0 && w >= 0 && d < 8 * in->grid_depth && h < 8 * in->grid_height && w < 8 * in->grid_width;
}
__device__
inline void col2oc_leaf(const ot_data_t* out, const ot_tree_t* leaf_tree, const int leaf_leaf_idx, const int leaf_grid_idx, const int leaf_bit_idx,
const int n, const int ds, const int hs, const int ws, const int size,
ot_data_t* shared, octree* in) {
ot_data_t factor;
int d,h,w, kidx, grid_idx, bit_idx, data_idx, leaf_idx, data_cnt, data_cnt_e1, data_cnt_e2;
ot_tree_t* tree;
ot_data_t* data_in;
ot_data_t val;
data_idx = tree_data_idx(leaf_tree, leaf_bit_idx, in->feature_size);
// data_in = in->data_ptrs[leaf_grid_idx] + data_idx;
data_in = octree_get_data(in, leaf_grid_idx) + data_idx;
for(int rep_f = 0; rep_f < (in->feature_size + SHARED_N_FEAT - 1) / SHARED_N_FEAT; ++rep_f) {
int from_f = rep_f * SHARED_N_FEAT;
int to_f = IMIN(SHARED_N_FEAT, in->feature_size - rep_f * SHARED_N_FEAT);
for(int f = 0; f < to_f; ++f) {
leaf_idx = leaf_leaf_idx;
factor = 1.f / (size * size * size);
//leaf data
//1-center
// (1,1,1)=13
val = size*size*size * factor;
shared[f] = val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 13];
//6
val = (size-1)*size*size * factor;
//(0,1,1)=4, (2,1,1)=22, (1,0,1)=10, (1,2,1)=16, (1,1,0)=12, (1,1,2)=14
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 4];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 10];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 12];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 14];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 16];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 22];
//8
val = (size-1)*(size-1)*(size-1) * factor;
//(0,0,0)=0, (0,0,2)=2, (0,2,0)=6, (0,2,2)=8,
//(2,0,0)=18, (2,0,2)=20, (2,2,0)=24, (2,2,2)=26
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 0];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 2];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 6];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 8];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 18];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 20];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 24];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 26];
//12
val = (size-1)*(size-1)*(size) * factor;
//(0,0,1)=1, (0,1,0)=3, (0,1,2)=5, (0,2,1)=7
//(1,0,0)=9, (1,0,2)=11, (1,2,0)=15, (1,2,2)=17
//(2,0,1)=19, (2,1,0)=21, (2,1,2)=23, (2,2,1)=25
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 1];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 3];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 5];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 7];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 9];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 11];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 15];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 17];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 19];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 21];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 23];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 25];
}
//corner data
for(int cd = 0; cd < 2; ++cd) {
for(int ch = 0; ch < 2; ++ch) {
for(int cw = 0; cw < 2; ++cw) {
d = ds + (cd*(size+1)-1); h = hs + (ch*(size+1)-1); w = ws + (cw*(size+1)-1);
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
factor = 1.f / (bit_idx == 0 ? 512 : (bit_idx < 9 ? 64 : (bit_idx < 73 ? 8 : 1)));
kidx = ((1-cd)*2*3 + (1-ch)*2)*3 + (1-cw)*2;
for(int f = 0; f < to_f; ++f) {
shared[f] += factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
}
}
}
}
//along the edges
//d
for(int ch = 0; ch < 2; ++ch) {
for(int cw = 0; cw < 2; ++cw) {
d = ds; h = hs + (ch*(size+1)-1); w = ws + (cw*(size+1)-1);
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int e = 0;
while(e < size) {
d = ds + e;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size - e, data_cnt);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
for(int f = 0; f < to_f; ++f) {
kidx = ((2) * 3 + ((1-ch)*2)) * 3 + ((1-cw)*2);
shared[f] += (data_cnt - (e+data_cnt >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + ((1-ch)*2)) * 3 + ((1-cw)*2);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + ((1-ch)*2)) * 3 + ((1-cw)*2);
shared[f] += (data_cnt - (e == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
e += data_cnt;
}
}
}
}
//h
for(int cd = 0; cd < 2; ++cd) {
for(int cw = 0; cw < 2; ++cw) {
d = ds + (cd*(size+1)-1); h = hs; w = ws + (cw*(size+1)-1);
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int e = 0;
while(e < size) {
h = hs + e;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size - e, data_cnt);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
for(int f = 0; f < to_f; ++f) {
kidx = (((1-cd)*2) * 3 + (2)) * 3 + ((1-cw)*2);
shared[f] += (data_cnt - (e+data_cnt >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-cd)*2) * 3 + (1)) * 3 + ((1-cw)*2);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-cd)*2) * 3 + (0)) * 3 + ((1-cw)*2);
shared[f] += (data_cnt - (e == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
e += data_cnt;
}
}
}
}
//w
for(int cd = 0; cd < 2; ++cd) {
for(int ch = 0; ch < 2; ++ch) {
d = ds + (cd*(size+1)-1); h = hs + (ch*(size+1)-1); w = ws;
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int e = 0;
while(e < size) {
w = ws + e;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size - e, data_cnt);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
for(int f = 0; f < to_f; ++f) {
kidx = (((1-cd)*2) * 3 + ((1-ch)*2)) * 3 + (2);
shared[f] += (data_cnt - (e+data_cnt >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-cd)*2) * 3 + ((1-ch)*2)) * 3 + (1);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-cd)*2) * 3 + ((1-ch)*2)) * 3 + (0);
shared[f] += (data_cnt - (e == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
e += data_cnt;
}
}
}
}
//along the faces
//d
for(int fd = 0; fd < 2; ++fd) {
d = ds + (fd*(size+1)-1); h = hs; w = ws;
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int z = 0;
while(z < size * size) {
const int e1 = z_curve_x(z);
const int e2 = z_curve_y(z);
h = hs + e1;
w = ws + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt_e1 = IMIN(size - e1, data_cnt);
data_cnt_e2 = IMIN(size - e2, data_cnt);
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size * size - z, data_cnt * data_cnt);
for(int f = 0; f < to_f; ++f) {
kidx = (((1-fd)*2) * 3 + (2)) * 3 + (2);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (2)) * 3 + (1);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (2)) * 3 + (0);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (1)) * 3 + (2);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (1)) * 3 + (1);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (1)) * 3 + (0);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (0)) * 3 + (2);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (0)) * 3 + (1);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (0)) * 3 + (0);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
z += data_cnt;
}
}
}
//h
for(int fh = 0; fh < 2; ++fh) {
d = ds; h = hs + (fh*(size+1)-1); w = ws;
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int z = 0;
while(z < size * size) {
const int e1 = z_curve_x(z);
const int e2 = z_curve_y(z);
d = ds + e1;
w = ws + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt_e1 = IMIN(size - e1, data_cnt);
data_cnt_e2 = IMIN(size - e2, data_cnt);
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size * size - z, data_cnt * data_cnt);
for(int f = 0; f < to_f; ++f) {
kidx = ((2) * 3 + ((1-fh)*2)) * 3 + (2);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((2) * 3 + ((1-fh)*2)) * 3 + (1);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((2) * 3 + ((1-fh)*2)) * 3 + (0);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + ((1-fh)*2)) * 3 + (2);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + ((1-fh)*2)) * 3 + (1);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + ((1-fh)*2)) * 3 + (0);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + ((1-fh)*2)) * 3 + (2);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + ((1-fh)*2)) * 3 + (1);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + ((1-fh)*2)) * 3 + (0);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
z += data_cnt;
}
}
}
//w
for(int fw = 0; fw < 2; ++fw) {
d = ds; h = hs; w = ws + (fw*(size+1)-1);
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int z = 0;
while(z < size * size) {
const int e1 = z_curve_x(z);
const int e2 = z_curve_y(z);
d = ds + e1;
h = hs + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt_e1 = IMIN(size - e1, data_cnt);
data_cnt_e2 = IMIN(size - e2, data_cnt);
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size * size - z, data_cnt * data_cnt);
for(int f = 0; f < to_f; ++f) {
kidx = ((2) * 3 + (2)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((2) * 3 + (1)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((2) * 3 + (0)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + (2)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + (1)) * 3 + ((1-fw)*2);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + (0)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + (2)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + (1)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + (0)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
z += data_cnt;
}
}
}
//copy shared mem
for(int f = 0; f < to_f; ++f) {
data_in[from_f + f] = shared[f];
}
}
}
__global__ void kernel_col2oc_leafs(octree in, int n_leafs, const ot_data_t* col_buffer) {
extern __shared__ ot_data_t out_shared[];
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
const int grid_idx = in.data[leaf_idx * in.feature_size];
const ot_tree_t* tree = octree_get_tree(&in, grid_idx);
// const int cum_n_leafs = n_leafs_upto(&in, grid_idx);
const int cum_n_leafs = in.prefix_leafs[grid_idx];
const int data_idx = leaf_idx - cum_n_leafs;
const int bit_idx = data_idx_to_bit_idx(tree, data_idx);
int n,d,h,w;
const int depth = octree_ind_to_dense_ind(&in, grid_idx, bit_idx, &n, &d,&h,&w);
const int size = width_from_depth(depth);
col2oc_leaf(col_buffer, tree, leaf_idx, grid_idx, bit_idx, n,d,h,w,size, out_shared + threadIdx.x * SHARED_N_FEAT, &in);
}
}
void col2oc_gpu(const ot_data_t* col_buffer, octree* in) {
const int n_blocks = octree_num_blocks(in);
octree_leaf_idx_to_grid_idx_gpu(in, in->feature_size, in->data_capacity, in->data);
// CUDA_CHECK( hipFuncSetCacheConfig(&kernel_oc2col_leafs, hipFuncCachePreferShared); );
hipLaunchKernelGGL(( kernel_col2oc_leafs), dim3(GET_BLOCKS_T(in->n_leafs, 256)), dim3(256), 256 * SHARED_N_FEAT * sizeof(ot_data_t), 0, *in, in->n_leafs, col_buffer);
CUDA_POST_KERNEL_CHECK;
}
| 0c9ef241b130dfc639063a0ac355ca7d60d02936.cu | // Copyright (c) 2017, The OctNet authors
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of the <organization> nor the
// names of its contributors may be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL OCTNET AUTHORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "octnet/gpu/col2oc.h"
#include "octnet/gpu/gpu.h"
#include "octnet/gpu/buffer.h"
#include "octnet/core/z_curve.h"
#define SHARED_N_FEAT 8
__device__
inline bool col2oc_in_vol(const octree* in, const int d, const int h, const int w) {
return d >= 0 && h >= 0 && w >= 0 && d < 8 * in->grid_depth && h < 8 * in->grid_height && w < 8 * in->grid_width;
}
__device__
inline void col2oc_leaf(const ot_data_t* out, const ot_tree_t* leaf_tree, const int leaf_leaf_idx, const int leaf_grid_idx, const int leaf_bit_idx,
const int n, const int ds, const int hs, const int ws, const int size,
ot_data_t* shared, octree* in) {
ot_data_t factor;
int d,h,w, kidx, grid_idx, bit_idx, data_idx, leaf_idx, data_cnt, data_cnt_e1, data_cnt_e2;
ot_tree_t* tree;
ot_data_t* data_in;
ot_data_t val;
data_idx = tree_data_idx(leaf_tree, leaf_bit_idx, in->feature_size);
// data_in = in->data_ptrs[leaf_grid_idx] + data_idx;
data_in = octree_get_data(in, leaf_grid_idx) + data_idx;
for(int rep_f = 0; rep_f < (in->feature_size + SHARED_N_FEAT - 1) / SHARED_N_FEAT; ++rep_f) {
int from_f = rep_f * SHARED_N_FEAT;
int to_f = IMIN(SHARED_N_FEAT, in->feature_size - rep_f * SHARED_N_FEAT);
for(int f = 0; f < to_f; ++f) {
leaf_idx = leaf_leaf_idx;
factor = 1.f / (size * size * size);
//leaf data
//1-center
// (1,1,1)=13
val = size*size*size * factor;
shared[f] = val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 13];
//6
val = (size-1)*size*size * factor;
//(0,1,1)=4, (2,1,1)=22, (1,0,1)=10, (1,2,1)=16, (1,1,0)=12, (1,1,2)=14
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 4];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 10];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 12];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 14];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 16];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 22];
//8
val = (size-1)*(size-1)*(size-1) * factor;
//(0,0,0)=0, (0,0,2)=2, (0,2,0)=6, (0,2,2)=8,
//(2,0,0)=18, (2,0,2)=20, (2,2,0)=24, (2,2,2)=26
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 0];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 2];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 6];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 8];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 18];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 20];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 24];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 26];
//12
val = (size-1)*(size-1)*(size) * factor;
//(0,0,1)=1, (0,1,0)=3, (0,1,2)=5, (0,2,1)=7
//(1,0,0)=9, (1,0,2)=11, (1,2,0)=15, (1,2,2)=17
//(2,0,1)=19, (2,1,0)=21, (2,1,2)=23, (2,2,1)=25
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 1];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 3];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 5];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 7];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 9];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 11];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 15];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 17];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 19];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 21];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 23];
shared[f] += val * out[(leaf_idx * in->feature_size + from_f + f) * K333 + 25];
}
//corner data
for(int cd = 0; cd < 2; ++cd) {
for(int ch = 0; ch < 2; ++ch) {
for(int cw = 0; cw < 2; ++cw) {
d = ds + (cd*(size+1)-1); h = hs + (ch*(size+1)-1); w = ws + (cw*(size+1)-1);
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
factor = 1.f / (bit_idx == 0 ? 512 : (bit_idx < 9 ? 64 : (bit_idx < 73 ? 8 : 1)));
kidx = ((1-cd)*2*3 + (1-ch)*2)*3 + (1-cw)*2;
for(int f = 0; f < to_f; ++f) {
shared[f] += factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
}
}
}
}
//along the edges
//d
for(int ch = 0; ch < 2; ++ch) {
for(int cw = 0; cw < 2; ++cw) {
d = ds; h = hs + (ch*(size+1)-1); w = ws + (cw*(size+1)-1);
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int e = 0;
while(e < size) {
d = ds + e;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size - e, data_cnt);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
for(int f = 0; f < to_f; ++f) {
kidx = ((2) * 3 + ((1-ch)*2)) * 3 + ((1-cw)*2);
shared[f] += (data_cnt - (e+data_cnt >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + ((1-ch)*2)) * 3 + ((1-cw)*2);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + ((1-ch)*2)) * 3 + ((1-cw)*2);
shared[f] += (data_cnt - (e == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
e += data_cnt;
}
}
}
}
//h
for(int cd = 0; cd < 2; ++cd) {
for(int cw = 0; cw < 2; ++cw) {
d = ds + (cd*(size+1)-1); h = hs; w = ws + (cw*(size+1)-1);
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int e = 0;
while(e < size) {
h = hs + e;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size - e, data_cnt);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
for(int f = 0; f < to_f; ++f) {
kidx = (((1-cd)*2) * 3 + (2)) * 3 + ((1-cw)*2);
shared[f] += (data_cnt - (e+data_cnt >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-cd)*2) * 3 + (1)) * 3 + ((1-cw)*2);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-cd)*2) * 3 + (0)) * 3 + ((1-cw)*2);
shared[f] += (data_cnt - (e == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
e += data_cnt;
}
}
}
}
//w
for(int cd = 0; cd < 2; ++cd) {
for(int ch = 0; ch < 2; ++ch) {
d = ds + (cd*(size+1)-1); h = hs + (ch*(size+1)-1); w = ws;
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int e = 0;
while(e < size) {
w = ws + e;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size - e, data_cnt);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
for(int f = 0; f < to_f; ++f) {
kidx = (((1-cd)*2) * 3 + ((1-ch)*2)) * 3 + (2);
shared[f] += (data_cnt - (e+data_cnt >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-cd)*2) * 3 + ((1-ch)*2)) * 3 + (1);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-cd)*2) * 3 + ((1-ch)*2)) * 3 + (0);
shared[f] += (data_cnt - (e == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
e += data_cnt;
}
}
}
}
//along the faces
//d
for(int fd = 0; fd < 2; ++fd) {
d = ds + (fd*(size+1)-1); h = hs; w = ws;
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int z = 0;
while(z < size * size) {
const int e1 = z_curve_x(z);
const int e2 = z_curve_y(z);
h = hs + e1;
w = ws + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt_e1 = IMIN(size - e1, data_cnt);
data_cnt_e2 = IMIN(size - e2, data_cnt);
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size * size - z, data_cnt * data_cnt);
for(int f = 0; f < to_f; ++f) {
kidx = (((1-fd)*2) * 3 + (2)) * 3 + (2);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (2)) * 3 + (1);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (2)) * 3 + (0);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (1)) * 3 + (2);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (1)) * 3 + (1);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (1)) * 3 + (0);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (0)) * 3 + (2);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (0)) * 3 + (1);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = (((1-fd)*2) * 3 + (0)) * 3 + (0);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
z += data_cnt;
}
}
}
//h
for(int fh = 0; fh < 2; ++fh) {
d = ds; h = hs + (fh*(size+1)-1); w = ws;
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int z = 0;
while(z < size * size) {
const int e1 = z_curve_x(z);
const int e2 = z_curve_y(z);
d = ds + e1;
w = ws + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt_e1 = IMIN(size - e1, data_cnt);
data_cnt_e2 = IMIN(size - e2, data_cnt);
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size * size - z, data_cnt * data_cnt);
for(int f = 0; f < to_f; ++f) {
kidx = ((2) * 3 + ((1-fh)*2)) * 3 + (2);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((2) * 3 + ((1-fh)*2)) * 3 + (1);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((2) * 3 + ((1-fh)*2)) * 3 + (0);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + ((1-fh)*2)) * 3 + (2);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + ((1-fh)*2)) * 3 + (1);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + ((1-fh)*2)) * 3 + (0);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + ((1-fh)*2)) * 3 + (2);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + ((1-fh)*2)) * 3 + (1);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + ((1-fh)*2)) * 3 + (0);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
z += data_cnt;
}
}
}
//w
for(int fw = 0; fw < 2; ++fw) {
d = ds; h = hs; w = ws + (fw*(size+1)-1);
if(col2oc_in_vol(in, d,h,w)) {
grid_idx = octree_grid_idx(in, n, d / 8, h / 8, w / 8);
tree = octree_get_tree(in, grid_idx);
int z = 0;
while(z < size * size) {
const int e1 = z_curve_x(z);
const int e2 = z_curve_y(z);
d = ds + e1;
h = hs + e2;
bit_idx = tree_bit_idx(tree, d % 8, h % 8, w % 8);
data_idx = tree_data_idx(tree, bit_idx, 1);
// leaf_idx = n_leafs_upto(in, grid_idx) + data_idx;
leaf_idx = in->prefix_leafs[grid_idx] + data_idx;
data_cnt = bit_idx == 0 ? 8 : (bit_idx < 9 ? 4 : (bit_idx < 73 ? 2 : 1));
data_cnt_e1 = IMIN(size - e1, data_cnt);
data_cnt_e2 = IMIN(size - e2, data_cnt);
factor = 1.f / (data_cnt * data_cnt * data_cnt);
data_cnt = IMIN(size * size - z, data_cnt * data_cnt);
for(int f = 0; f < to_f; ++f) {
kidx = ((2) * 3 + (2)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((2) * 3 + (1)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((2) * 3 + (0)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1+data_cnt_e1 >= size)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + (2)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + (1)) * 3 + ((1-fw)*2);
shared[f] += data_cnt * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((1) * 3 + (0)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + (2)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2+data_cnt_e2 >= size)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + (1)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
kidx = ((0) * 3 + (0)) * 3 + ((1-fw)*2);
shared[f] += (data_cnt_e1 - (e1 == 0)) * (data_cnt_e2 - (e2 == 0)) * factor * out[(leaf_idx * in->feature_size + from_f + f) * K333 + kidx];
}
z += data_cnt;
}
}
}
//copy shared mem
for(int f = 0; f < to_f; ++f) {
data_in[from_f + f] = shared[f];
}
}
}
__global__ void kernel_col2oc_leafs(octree in, int n_leafs, const ot_data_t* col_buffer) {
extern __shared__ ot_data_t out_shared[];
CUDA_KERNEL_LOOP(leaf_idx, n_leafs) {
const int grid_idx = in.data[leaf_idx * in.feature_size];
const ot_tree_t* tree = octree_get_tree(&in, grid_idx);
// const int cum_n_leafs = n_leafs_upto(&in, grid_idx);
const int cum_n_leafs = in.prefix_leafs[grid_idx];
const int data_idx = leaf_idx - cum_n_leafs;
const int bit_idx = data_idx_to_bit_idx(tree, data_idx);
int n,d,h,w;
const int depth = octree_ind_to_dense_ind(&in, grid_idx, bit_idx, &n, &d,&h,&w);
const int size = width_from_depth(depth);
col2oc_leaf(col_buffer, tree, leaf_idx, grid_idx, bit_idx, n,d,h,w,size, out_shared + threadIdx.x * SHARED_N_FEAT, &in);
}
}
void col2oc_gpu(const ot_data_t* col_buffer, octree* in) {
const int n_blocks = octree_num_blocks(in);
octree_leaf_idx_to_grid_idx_gpu(in, in->feature_size, in->data_capacity, in->data);
// CUDA_CHECK( cudaFuncSetCacheConfig(&kernel_oc2col_leafs, cudaFuncCachePreferShared); );
kernel_col2oc_leafs<<<GET_BLOCKS_T(in->n_leafs, 256), 256, 256 * SHARED_N_FEAT * sizeof(ot_data_t)>>>(*in, in->n_leafs, col_buffer);
CUDA_POST_KERNEL_CHECK;
}
|
0f4958caf2971218b0d5b34c5351da9033f81f31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pointwise_hist2_one_byte_templ.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <library/cuda/wrappers/arch.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <>
struct TLoadEntriesTrait<3, false> {
constexpr static ELoadType LoadType() {
return ELoadType::OneElement;
}
};
template <>
struct TLoadEntriesTrait<3, true> {
constexpr static ELoadType LoadType() {
#if __CUDA_ARCH__ < 520
return ELoadType::OneElement;
#else
return ELoadType::TwoElements;
#endif
}
};
template <>
struct TDeclarePassInnerOuterBitsTrait<3> {
constexpr static int Inner() {
return 1;
}
constexpr static int Outer() {
return 2;
}
};
template <int BLOCK_SIZE>
struct TPointHist<2, 1, BLOCK_SIZE> {
constexpr static int OUTER_HIST_BITS_COUNT = 2;
constexpr static int INNER_HIST_BITS_COUNT = 1;
float* __restrict__ Buffer;
float mostRecentStat1[4];
float mostRecentStat2[4];
uchar mostRecentBin[4];
__forceinline__ __device__ int SliceOffset() {
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
static_assert(OUTER_HIST_BITS_COUNT <= 2, "Error: assume 12 warps, so limited by 128-bin histogram per warp");
static_assert(OUTER_HIST_BITS_COUNT > 0 && INNER_HIST_BITS_COUNT > 0, "This histogram is specialized for 255 bin count");
const int warpId = (threadIdx.x / 32) % maxBlocks;
const int warpOffset = (1024 << OUTER_HIST_BITS_COUNT) * warpId;
const int blocks = 4 >> INNER_HIST_BITS_COUNT;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (INNER_HIST_BITS_COUNT + 3)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
#pragma unroll
for (int f = 0; f < 4; ++f) {
mostRecentBin[f] = 0;
mostRecentStat1[f] = 0;
mostRecentStat2[f] = 0;
}
}
__forceinline__ __device__ void Add(float val, float* dst) {
atomicAdd(dst, val);
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const uchar bin = bfe(ci, 24 - (f << 2), 8);
if (bin != mostRecentBin[i]) {
int offset = f;
const uchar mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
offset += flag;
Add(mostRecentStat1[i], Buffer + offset);
offset = flag ? offset - 1 : offset + 1;
Add(mostRecentStat2[i], Buffer + offset);
mostRecentBin[i] = bin;
mostRecentStat1[i] = 0;
mostRecentStat2[i] = 0;
}
{
mostRecentStat1[i] += stat1;
mostRecentStat2[i] += stat2;
}
}
}
__forceinline__ __device__ void AddPoint2(uint2 bin, const float2 t, const float2 w) {
AddPoint(bin.x, t.x, w.x);
AddPoint(bin.y, t.y, w.y);
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
AddPoint(ci.x, t.x, w.x);
AddPoint(ci.y, t.y, w.y);
AddPoint(ci.z, t.z, w.z);
AddPoint(ci.w, t.w, w.w);
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((2 * i + threadIdx.x) & 6);
int offset = f;
const int mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
Add(mostRecentStat1[i], Buffer + offset + flag);
Add(mostRecentStat2[i], Buffer + offset + !flag);
}
}
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024 << OUTER_HIST_BITS_COUNT;
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll maxBlocks
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
float sum[4];
const int maxFoldCount = (1 << (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT));
for (int fold = (threadIdx.x >> 1); fold < maxFoldCount; fold += 128) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] = 0;
}
const int innerHistCount = 4 >> INNER_HIST_BITS_COUNT;
const int lowBitMask = (1 << INNER_HIST_BITS_COUNT) - 1;
const float* __restrict__ src = Buffer
+ (1024 << OUTER_HIST_BITS_COUNT) //warpHistSize
+ 8 * (fold & lowBitMask)
+ 32 * (fold >> INNER_HIST_BITS_COUNT)
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] += src[2 * f + (inWarpHist << (3 + INNER_HIST_BITS_COUNT))];
}
}
#pragma unroll
for (int f = 0; f < 4; ++f) {
Buffer[2 * (maxFoldCount * f + fold) + w] = sum[f];
}
}
}
__syncthreads();
}
};
DEFINE_NON_BINARY(8)
}
| 0f4958caf2971218b0d5b34c5351da9033f81f31.cu | #include "pointwise_hist2_one_byte_templ.cuh"
#include <cooperative_groups.h>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <library/cuda/wrappers/arch.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template <>
struct TLoadEntriesTrait<3, false> {
constexpr static ELoadType LoadType() {
return ELoadType::OneElement;
}
};
template <>
struct TLoadEntriesTrait<3, true> {
constexpr static ELoadType LoadType() {
#if __CUDA_ARCH__ < 520
return ELoadType::OneElement;
#else
return ELoadType::TwoElements;
#endif
}
};
template <>
struct TDeclarePassInnerOuterBitsTrait<3> {
constexpr static int Inner() {
return 1;
}
constexpr static int Outer() {
return 2;
}
};
template <int BLOCK_SIZE>
struct TPointHist<2, 1, BLOCK_SIZE> {
constexpr static int OUTER_HIST_BITS_COUNT = 2;
constexpr static int INNER_HIST_BITS_COUNT = 1;
float* __restrict__ Buffer;
float mostRecentStat1[4];
float mostRecentStat2[4];
uchar mostRecentBin[4];
__forceinline__ __device__ int SliceOffset() {
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
static_assert(OUTER_HIST_BITS_COUNT <= 2, "Error: assume 12 warps, so limited by 128-bin histogram per warp");
static_assert(OUTER_HIST_BITS_COUNT > 0 && INNER_HIST_BITS_COUNT > 0, "This histogram is specialized for 255 bin count");
const int warpId = (threadIdx.x / 32) % maxBlocks;
const int warpOffset = (1024 << OUTER_HIST_BITS_COUNT) * warpId;
const int blocks = 4 >> INNER_HIST_BITS_COUNT;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (INNER_HIST_BITS_COUNT + 3)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
#pragma unroll
for (int f = 0; f < 4; ++f) {
mostRecentBin[f] = 0;
mostRecentStat1[f] = 0;
mostRecentStat2[f] = 0;
}
}
__forceinline__ __device__ void Add(float val, float* dst) {
atomicAdd(dst, val);
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const uchar bin = bfe(ci, 24 - (f << 2), 8);
if (bin != mostRecentBin[i]) {
int offset = f;
const uchar mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
offset += flag;
Add(mostRecentStat1[i], Buffer + offset);
offset = flag ? offset - 1 : offset + 1;
Add(mostRecentStat2[i], Buffer + offset);
mostRecentBin[i] = bin;
mostRecentStat1[i] = 0;
mostRecentStat2[i] = 0;
}
{
mostRecentStat1[i] += stat1;
mostRecentStat2[i] += stat2;
}
}
}
__forceinline__ __device__ void AddPoint2(uint2 bin, const float2 t, const float2 w) {
AddPoint(bin.x, t.x, w.x);
AddPoint(bin.y, t.y, w.y);
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
AddPoint(ci.x, t.x, w.x);
AddPoint(ci.y, t.y, w.y);
AddPoint(ci.z, t.z, w.z);
AddPoint(ci.w, t.w, w.w);
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((2 * i + threadIdx.x) & 6);
int offset = f;
const int mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
Add(mostRecentStat1[i], Buffer + offset + flag);
Add(mostRecentStat2[i], Buffer + offset + !flag);
}
}
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024 << OUTER_HIST_BITS_COUNT;
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll maxBlocks
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
float sum[4];
const int maxFoldCount = (1 << (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT));
for (int fold = (threadIdx.x >> 1); fold < maxFoldCount; fold += 128) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] = 0;
}
const int innerHistCount = 4 >> INNER_HIST_BITS_COUNT;
const int lowBitMask = (1 << INNER_HIST_BITS_COUNT) - 1;
const float* __restrict__ src = Buffer
+ (1024 << OUTER_HIST_BITS_COUNT) //warpHistSize
+ 8 * (fold & lowBitMask)
+ 32 * (fold >> INNER_HIST_BITS_COUNT)
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] += src[2 * f + (inWarpHist << (3 + INNER_HIST_BITS_COUNT))];
}
}
#pragma unroll
for (int f = 0; f < 4; ++f) {
Buffer[2 * (maxFoldCount * f + fold) + w] = sum[f];
}
}
}
__syncthreads();
}
};
DEFINE_NON_BINARY(8)
}
|
1418edcedef28cce8d4f50a0519bee03b872a6bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <rocblas.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#define pow_2(x) ( ((x) * (x)) )
__global__
void atoms_difference(sMolecule A, sMolecule B,
float * d_result,
int i)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
// printf("processing (%d, %d)\n", i, j);
if (j < i) {
float da = sqrt(pow_2(A.x[i] - A.x[j])
+ pow_2(A.y[i] - A.y[j])
+ pow_2(A.z[i] - A.z[j]));
float db = sqrt(pow_2(B.x[i] - B.x[j])
+ pow_2(B.y[i] - B.y[j])
+ pow_2(B.z[i] - B.z[j]));
// printf("Ax diff [%f, %f, %f]\n",
// pow_2(A.x[i] - A.x[j]),
// pow_2(A.y[i] - A.y[j]),
// pow_2(A.z[i] - A.z[j]));
// printf("Da: %f db: %f\n", da, db);
// printf("saving result: %f\n", pow_2(da - db));
d_result[j] += pow_2(da - db);
}
}
float solveGPU(sMolecule d_A, sMolecule d_B, int n) {
int BLOCK_SIZE_X = 256;
int GRID_SIZE_X = (n / BLOCK_SIZE_X) + 1;
dim3 dimBlock(BLOCK_SIZE_X);
dim3 dimGrid(GRID_SIZE_X);
float *d_result;
int result_size = GRID_SIZE_X * BLOCK_SIZE_X;
hipError_t err = hipMalloc(&d_result, result_size * sizeof(float));
if ( hipSuccess != err ) {
fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n",
__FILE__, __LINE__, hipGetErrorString(err) );
return 0.0f;
}
err = hipMemset(d_result, 0, result_size * sizeof(float));
if ( hipSuccess != err ) {
fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n",
__FILE__, __LINE__, hipGetErrorString(err) );
return 0.0f;
}
for (int i = n - 1; i >= 0; --i) {
GRID_SIZE_X = (i / BLOCK_SIZE_X) + 1;
hipLaunchKernelGGL(( atoms_difference), dim3(dim3(GRID_SIZE_X)), dim3(dimBlock), 0, 0, d_A, d_B, d_result, i);
}
float RMSD = 0;
thrust::device_ptr<float> dptr(d_result);
RMSD = thrust::reduce(thrust::device, dptr, dptr + result_size);
hipFree(d_result);
return sqrt(1 / ((float)n * ((float)n - 1)) * RMSD);
}
| 1418edcedef28cce8d4f50a0519bee03b872a6bb.cu | #include <cublas_v2.h>
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#define pow_2(x) ( ((x) * (x)) )
__global__
void atoms_difference(sMolecule A, sMolecule B,
float * d_result,
int i)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
// printf("processing (%d, %d)\n", i, j);
if (j < i) {
float da = sqrt(pow_2(A.x[i] - A.x[j])
+ pow_2(A.y[i] - A.y[j])
+ pow_2(A.z[i] - A.z[j]));
float db = sqrt(pow_2(B.x[i] - B.x[j])
+ pow_2(B.y[i] - B.y[j])
+ pow_2(B.z[i] - B.z[j]));
// printf("Ax diff [%f, %f, %f]\n",
// pow_2(A.x[i] - A.x[j]),
// pow_2(A.y[i] - A.y[j]),
// pow_2(A.z[i] - A.z[j]));
// printf("Da: %f db: %f\n", da, db);
// printf("saving result: %f\n", pow_2(da - db));
d_result[j] += pow_2(da - db);
}
}
float solveGPU(sMolecule d_A, sMolecule d_B, int n) {
int BLOCK_SIZE_X = 256;
int GRID_SIZE_X = (n / BLOCK_SIZE_X) + 1;
dim3 dimBlock(BLOCK_SIZE_X);
dim3 dimGrid(GRID_SIZE_X);
float *d_result;
int result_size = GRID_SIZE_X * BLOCK_SIZE_X;
cudaError err = cudaMalloc(&d_result, result_size * sizeof(float));
if ( cudaSuccess != err ) {
fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n",
__FILE__, __LINE__, cudaGetErrorString(err) );
return 0.0f;
}
err = cudaMemset(d_result, 0, result_size * sizeof(float));
if ( cudaSuccess != err ) {
fprintf( stderr, "Cuda error in file '%s' in line %i : %s.\n",
__FILE__, __LINE__, cudaGetErrorString(err) );
return 0.0f;
}
for (int i = n - 1; i >= 0; --i) {
GRID_SIZE_X = (i / BLOCK_SIZE_X) + 1;
atoms_difference<<<dim3(GRID_SIZE_X), dimBlock>>> (d_A, d_B, d_result, i);
}
float RMSD = 0;
thrust::device_ptr<float> dptr(d_result);
RMSD = thrust::reduce(thrust::device, dptr, dptr + result_size);
cudaFree(d_result);
return sqrt(1 / ((float)n * ((float)n - 1)) * RMSD);
}
|
6d1ec781c25615bb9f409d0e48b976d6b09cc0a8.hip | // !!! This is a file automatically generated by hipify!!!
//pass
//--blockDim=512 --gridDim=1 --no-inline
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime_api.h>
#define N 2 //512
__global__ void curand_test(hiprandState_t *state, float *A) {
A[threadIdx.x] = hiprand(&state[threadIdx.x]); // the pseudo random number returned by 'hiprand' is an unsigned int
}
int main() {
float *a;
float *dev_a;
hiprandState_t *dev_state; // is not necessary to initialize dev_state because it is a receptor in the function.
int size = N*sizeof(float);
a = (float*)malloc(size);
hipMalloc ((void**) &dev_a, size);
printf("old a: ");
for (int i = 0; i < N; i++)
printf("%f ", a[i]);
hipMalloc ( (void**) &dev_state, N*sizeof( hiprandState_t ) );
hipLaunchKernelGGL(( curand_test), dim3(1),dim3(N), 0, 0, dev_state, dev_a);
//ESBMC_verify_kernel(curand_test,1,N,dev_state,dev_a);
hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost);
printf("\nnew a: ");
for (int i = 0; i < N; i++) {
printf("%f ", a[i]);
//assert((a[i] == 0 || a[i] == 1)); // we can't put assert() here because we get random numbers
// maybe we can check if they are > 0 or not NULL... ?
}
free(a);
hipFree(dev_a);
hipFree(dev_state);
return 0;
}
| 6d1ec781c25615bb9f409d0e48b976d6b09cc0a8.cu | //pass
//--blockDim=512 --gridDim=1 --no-inline
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
#include <cuda_runtime_api.h>
#define N 2 //512
__global__ void curand_test(curandState *state, float *A) {
A[threadIdx.x] = curand(&state[threadIdx.x]); // the pseudo random number returned by 'curand' is an unsigned int
}
int main() {
float *a;
float *dev_a;
curandState *dev_state; // is not necessary to initialize dev_state because it is a receptor in the function.
int size = N*sizeof(float);
a = (float*)malloc(size);
cudaMalloc ((void**) &dev_a, size);
printf("old a: ");
for (int i = 0; i < N; i++)
printf("%f ", a[i]);
cudaMalloc ( (void**) &dev_state, N*sizeof( curandState ) );
curand_test<<<1,N>>>(dev_state, dev_a);
//ESBMC_verify_kernel(curand_test,1,N,dev_state,dev_a);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
printf("\nnew a: ");
for (int i = 0; i < N; i++) {
printf("%f ", a[i]);
//assert((a[i] == 0 || a[i] == 1)); // we can't put assert() here because we get random numbers
// maybe we can check if they are > 0 or not NULL... ?
}
free(a);
cudaFree(dev_a);
cudaFree(dev_state);
return 0;
}
|
4fb84d8d748d067ce16f552a25fc264ef479af55.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include "time.h"
constexpr int segment_size = 1024;
constexpr int threads = 512;
__device__ char *pool;
void __global__ alloc(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
// pointers[index] = (int *)malloc(segment_size);
pointers[index] = (int *)atomicAdd((unsigned long long *)&pool, segment_size);
}
void __global__ fill(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < segment_size / sizeof(int); i++) {
pointers[index][i] = i;
}
}
void __global__ free(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
// free(pointers[index]);
}
int main() {
int **pointers;
hipMalloc(&pointers, threads * sizeof(int *));
int bd = 32;
for (int i = 0; i < 10; i++) {
char *pool_;
hipMallocManaged(&pool_, segment_size * threads);
hipMemcpyToSymbol(pool, &pool_, sizeof(void *));
hipLaunchKernelGGL(( alloc), dim3(threads / bd), dim3(bd), 0, 0, pointers);
hipLaunchKernelGGL(( fill), dim3(threads / bd), dim3(bd), 0, 0, pointers);
hipLaunchKernelGGL(( free), dim3(threads / bd), dim3(bd), 0, 0, pointers);
}
hipDeviceSynchronize();
}
| 4fb84d8d748d067ce16f552a25fc264ef479af55.cu | #include "cuda_runtime.h"
#include <cstdio>
#include "time.h"
constexpr int segment_size = 1024;
constexpr int threads = 512;
__device__ char *pool;
void __global__ alloc(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
// pointers[index] = (int *)malloc(segment_size);
pointers[index] = (int *)atomicAdd((unsigned long long *)&pool, segment_size);
}
void __global__ fill(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < segment_size / sizeof(int); i++) {
pointers[index][i] = i;
}
}
void __global__ free(int **pointers) {
auto index = blockIdx.x * blockDim.x + threadIdx.x;
// free(pointers[index]);
}
int main() {
int **pointers;
cudaMalloc(&pointers, threads * sizeof(int *));
int bd = 32;
for (int i = 0; i < 10; i++) {
char *pool_;
cudaMallocManaged(&pool_, segment_size * threads);
cudaMemcpyToSymbol(pool, &pool_, sizeof(void *));
alloc<<<threads / bd, bd>>>(pointers);
fill<<<threads / bd, bd>>>(pointers);
free<<<threads / bd, bd>>>(pointers);
}
cudaDeviceSynchronize();
}
|
39da68d0e15a5b79e9737080f59d998049c010d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../../include/camera/cuda/cuPCU3f.h"
// To use the memory that is defined in cuDeviceMemory/
// If this flag is not set, the device memory that is locally allocated is used.
#define GLOBAL_CUDA_MEMORY
// local
#include "cutil_math.h"
#include "cuDeviceMemory3f.h"
#include "cuPCUImpl3f.h"
#include "cuFilter.h" // bilateral filter
// stl
#include <conio.h>
using namespace texpert;
// To read image data out from the device.
//#define DEBUG_CUPCU
namespace texpert_cuPCU3f
{
// The input image on the device
float* image_dev;
float* image_temp_dev;
// debug images to visualize the output
float* image_output_dev = NULL;
float* image_normals_out_dev = NULL;
// the points and normal vectors
float3* point_output_dev = NULL;
float3* normals_output_dev = NULL;
// the points and normal vectors with all NAN and [0,0,0] points removed.
float3* point_output_clean_dev = NULL;
float3* normals_output_clean_dev = NULL;
int* count_dev = NULL; // returns the number of NaN elements that were removed.
int* dev_index_list = NULL;
// Memory for sampling pattern, for the uniform sample operation.
// It stores the sample pattern with 0 and 1.
unsigned short* g_cu_sampling_dev = NULL;
// The number of random pattern that should be used.
// The pattern are generated before the frame reader is started and
// changed to simulate "random"
const int max_number_of_random_patterns = 10;
// the index of the current random pattern in use
int g_current_random_pattern_index = 0;
// Memory for a random sampling pattern
unsigned short* g_cu_random_sampling_dev[max_number_of_random_patterns];
// 16 threads appears to be a few ms faster than 32 for a 640x480 image
// due to a few simple runtime tests.
const int THREADS_PER_BLOCK = 32;
}
using namespace texpert_cuPCU3f;
__global__ void pcu_cleanup_points(float3* src_points, float3* src_normals, int width, int height, int start, float3* dst_points, float3* dst_normals, int* good_points )
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = (j * width) + (i);
__shared__ int count;
if (count <= 0) count = start;
if (src_points[index].x == 0 && src_points[index].y == 0 && src_points[index].z == 0) return;
int local_id = count; count++;
dst_points[local_id] = src_points[index];
dst_normals[local_id] = src_normals[index];
(*good_points) = count;
}
/*
Project a image element at position [x, y] from image space into 3D space using the focal length of the camera.
@param x, y - the x and y position of the image element in the image grid in pixels.
@param pixel_depth - the depth of this particular pixel as float value.
@param focal_length_x - the focal lenght of the camera in pixel. x is the horizontal axis.
@param focal_length_y - the focal lenght of the camera in pixel. y is the vertical axis.
@param float cx, cy - the principle point
@param px, py, pz - pointers to the point values for z, y, z.
*/
__device__ void pcu_project_point(float x, float y, float pixel_depth, float focal_length_x, float focal_length_y, float cx, float cy, float* px, float* py, float* pz)
{
const float fl_const_x = 1.0f / focal_length_x;
const float fl_const_y = 1.0f / focal_length_y;
// Conversion from millimeters to meters
static const float conv_fac = 0.001f;
float x_val = 0.0f;
float y_val = 0.0f;
float z_val = 0.0f;
if(!isnan(pixel_depth)){
x_val = -(float) x * pixel_depth * fl_const_x * conv_fac + cx;
y_val = -(float) y * pixel_depth * fl_const_y * conv_fac + cy;
z_val = pixel_depth *conv_fac;
}
(*px) = x_val;
(*py) = y_val;
(*pz) = z_val;
}
/*
Calculates the normal vectors for points. The points must be organized in a grid.
@param src_points - the points organized in a float grid of size [width x height] and stored as float3 array.
@param width - the width of the image
@param height - the height of the image
@param step_size - the number of points that should be stepped over for the normal vector calculation
@param flip_normal - parameter is multiplied with normal vector. Set to 1 to keep the normal vector, set to -1 to flip it.
@param dst_normals - a float3 array to store the normal vectors as float3 [nx, ny, nz]
@param dst_image - the normal vector data is stored in an image as RGB values. For debug purpose.
*/
__global__ void pcu_calculate_normal_vector( float3* src_points, int width, int height, int step_size, float flip_normal, float3* dst_normals, float* dst_image)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int index_center = (j * width ) + (i );
int size = width * height;
// for visualization
int index_out = (j * width * 3) + (i * 3); // three channels for the output image
// for the src_points of type float, the channel size is 1
int index_north = ((j - step_size) * width ) + (i );
int index_east = (j * width ) + ((i + step_size ));
int index_south = ((j + step_size) * width ) + (i);
int index_west = ( j * width ) + ((i- step_size));
float3 reset_point; reset_point.x = 0.0f; reset_point.y = 0.0f; reset_point.z =0.0f;
float3 aveNormal; aveNormal.x = 0.0f; aveNormal.y = 0.0f; aveNormal.z = 0.0f;
float3 center = src_points[index_center];
int pointsUsed = 0;
const float max_dist = 0.2f;
//if (i >= step_size && i < width - step_size && j >= step_size && j < height - step_size)
//cross product quadrant 1
if (i < width-step_size && j >= step_size)
{
float3 north = src_points[index_north];
float3 east = src_points[index_east];
float3 temp = cross(east-center, north - center);
if (isfinite(temp.x) && fabs(east.z - center.z) < max_dist & fabs(north.z - center.z) < max_dist)
{
temp = normalize(temp);
if(!isnan(temp.x)){
aveNormal += temp;
pointsUsed++;
}
}
}
//cross product quadrant 2
if (i >= step_size && j >= step_size)
{
float3 north = src_points[index_north];
float3 west = src_points[index_west];
float3 temp = cross( north - center, west - center);
if (isfinite(temp.x) && abs(west.z - center.z) < max_dist && abs(north.z - center.z) < max_dist )
{
temp = normalize(temp);
if(!isnan(temp.x)){
aveNormal += temp;
pointsUsed++;
}
}
}
//cross product quadrant 3
if (i >= step_size && j < height - step_size)
{
float3 south = src_points[index_south];
float3 west = src_points[index_west];
float3 temp = cross( west - center, south - center);
if (isfinite(temp.x) && abs(west.z - center.z) < max_dist && abs(south.z - center.z) < max_dist)
{
temp = normalize(temp);
if(!isnan(temp.x)){
aveNormal += temp;
pointsUsed++;
}
}
}
//cross product quadrant 4
if ( i < width - step_size && j < height - step_size)
{
float3 south = src_points[index_south];
float3 east= src_points[index_east];
float3 temp = cross( south - center, east - center);
if (isfinite(temp.x) && abs(east.z - center.z) < max_dist && abs(south.z - center.z) < max_dist)
{
temp = normalize(temp);
if(!isnan(temp.x)){
aveNormal += temp;
pointsUsed++;
}
}
}
// check whether a normal vector exists
if(pointsUsed > 0){
aveNormal /= (pointsUsed);
//make unit vector
aveNormal = flip_normal * normalize( aveNormal);
src_points[index_center] = center;
}else
{
// we do not want a point if we do not have a normal vector
src_points[index_center] = reset_point;
}
dst_normals[index_center] = aveNormal;
// for visualization
dst_image[index_out] = aveNormal.z * 255.0;
dst_image[index_out+1] = aveNormal.y * 255.0;
dst_image[index_out+2] = aveNormal.x * 255.0;
}
/*
Project all points into 3D space
@param image - the input image of type unsigned short. One channel (with depth images) of size width x height.
@param width - the width of the image in pixel
@param height - the height of the immage in pixel
@param channels - the channels. Should be only one.
@param focal length - the focal length of the camera.
@param dst_point - pointer to the array with float3 that store all points.
@param dst_iamge - image with three channels that store the [x, y, z] position of each point in an image frame [i, j]
The image is for debug purposes or visualization
*/
__global__ void pcu_project_image_points(float *image, int width, int height, int channels, float focal_length_x, float focal_length_y, float cx, float cy, float3* dst_point, float* dst_image) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
//int k = (blockIdx.z * blockDim.z) + threadIdx.z;
int index = (j * width * channels) + (i * channels);
int index_out = (j * width * 3) + (i * 3); // three channels for the output image
float depth = (float)image[index];
float px = 0.0;
float py = 0.0;
float pz = 0.0;
//------------------------------------------------------------------------------
// projection
pcu_project_point((float)i-width/2, (float)j-height/2, (float)image[index], focal_length_x, focal_length_y, cx, cy, &px, &py, &pz);
// RR, May 5, 2018
// Adopted to get a mean value from cuPositionFromRect
const float scale = 1.0; // to see some colors, scale to something dfferent than 1.
// The coordinates are flipped in pcu_project_point, thus, negative
dst_image[index_out] = (px * scale);
dst_image[index_out + 1] = (py * scale);
dst_image[index_out + 2] = (pz * scale);
dst_point[index].x = px;
dst_point[index].y = py;
dst_point[index].z = pz;
}
/*********************************************************************************************************************************************************************************************
Create a point cloud from a depth image
@param src_image_ptr - a pointer to the image of size [wdith x height x channels ] stored as an array of type float which stores the depth values as
A(i) = {d0, d1, d2, ..., dN} in mm.
@param width - the width of the image in pixels
@param height - the height of the image in pixels
@param channels - the number of channels. A depth image should have only 1 channel.
@param focal_legnth - the focal length of the camera in pixel
@param float cx, cy - the principle point
@param step_size - for normal vector calculations. The interger specifies how many steps a neighbor sould be away no obtain a vector for normal vector calculation.
Minimum step_size is 1.
@param points - a vector A(i) = {p0, p1, p2, ..., pN} with all points p_i = {px, py, pz} as float3.
@param normals - a vector A(i) = {n0, n1, n2, ..., nN} with all normal vectors n_i = {nx, ny, nz} as float3
@param to_host - if true, the device normals and points are copied back to the host. If false, this is skipped and the data in points and normals remains empty.
NOTE, COPYING DATA REQUIRES A SIGNIFICANT AMOUNT OF TIME AND SHOULD ONLY BE EXECUTED AT THE VERY LAST STEP
**********************************************************************************************************************************************************************************************/
//static
int cuPCU3f::CreatePointCloud(float* src_image_ptr, int width, int height, int channels, float focal_length_x, float focal_length_y, float cx, float cy, int step_size, float normal_flip, vector<float3>& points, vector<float3>& normals, bool to_host)
{
/*
std::ofstream off("test_cu.csv", std::ofstream::out);
cv::Mat img(480,640,CV_32FC1, src_image_ptr);
for (int i = 0; i < 480; i++) {
for (int j = 0; j < 640; j++) {
off << img.at<float>(i,j) << ",";
}
off << "\n";
}
off.close();
*/
step_size = (step_size <= 0) ? 1 : step_size;
int input_size = width* height* channels * sizeof( float);
int output_size = width* height* 3 * sizeof(float); // three channels
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
points.resize(width*height);
normals.resize(width*height);
//---------------------------------------------------------------------------------
// Allocating memory
// Moved to cuPCU::AllocateDeviceMemory()
//
// Allocate memory with AllocateDeviceMemory(.....)
//---------------------------------------------------------------------------------
// Copy memory
hipError_t err = hipMemcpy(image_dev, (float*)src_image_ptr, input_size, hipMemcpyHostToDevice);
if (err != 0) {
std::cout << "\n[KNN] - hipMemcpy error.\n";
}
err = hipGetLastError();
if (err != 0) {
std::cout << "\n[KNN] - hipMemcpy error (2).\n";
}
//---------------------------------------------------------------------------------
// Process the image
// compute the points
pcu_project_image_points << <blocks, threads_per_block >> > (image_dev, width, height, channels, focal_length_x, focal_length_y, cx, cy, point_output_dev, image_output_dev);
err = hipGetLastError();
if (err != 0) { std::cout << "\n[KNN] - points processing error.\n"; }
hipDeviceSynchronize();
// compute normal vectors
pcu_calculate_normal_vector << <blocks, threads_per_block >> > (point_output_dev, width, height, step_size, normal_flip, normals_output_dev, image_normals_out_dev);
err = hipGetLastError();
if (err != 0) { std::cout << "\n[KNN] - normals points processing error.\n"; }
hipDeviceSynchronize();
if (!to_host) return 1;
//---------------------------------------------------------------------------------
// Return the data
hipMemcpy(&points[0], point_output_dev, output_size, hipMemcpyDeviceToHost);
hipMemcpy(&normals[0], normals_output_dev, output_size, hipMemcpyDeviceToHost);
//int removed = 0;
//hipMemcpy(&removed, count_dev, sizeof(int), hipMemcpyDeviceToHost);
//cout << "Removed " << removed << " points" << endl;
#ifdef DEBUG_CUPCU
// renders an image of the outcomeing dataset on screen.
// Note. INCREASE THE SCALE IN pcu_project_image_points(...) TO SEE SOME COLOR.
// The values are scaled to meters.
cv::Mat output_points = cv::Mat::zeros(height, width, CV_32FC3);
cv::Mat output_normals = cv::Mat::zeros(height, width, CV_32FC3);
hipMemcpy((float*)output_points.data, image_output_dev, output_size, hipMemcpyDeviceToHost);
hipMemcpy((float*)output_normals.data, image_normals_out_dev, output_size, hipMemcpyDeviceToHost);
cv::Mat test_out, test_out_2, test_outf, test_out_2f;
output_points.convertTo(test_out, CV_8UC3);
output_normals.convertTo(test_out_2, CV_8UC3);
cv::flip(test_out, test_outf,1);
cv::flip(test_out_2, test_out_2f,1);
cv::imshow("Range image out", test_outf);
cv::imshow("Normal image out", test_out_2f);
cv::waitKey();
#endif
hipDeviceSynchronize();
return 1;
}
/*
Create a point cloud from a depth image with all points from device images as source. There is no copy operation involed.
@param src_device_image_ptr - a pointer to the image of size [wdith x height x channels ] stored as an array of type float which stores the depth values as
A(i) = {d0, d1, d2, ..., dN} in mm. . The image pointer points to DEVICE MEMORY.
@param width - the width of the image in pixels
@param height - the height of the image in pixels
@param channels - the number of channels. A depth image should have only 1 channel.
@param focal_legnth - the focal length of the camera in pixel
@param step_size - for normal vector calculations. The interger specifies how many steps a neighbor sould be away no obtain a vector for normal vector calculation.
Minimum step_size is 1.
@param normal_flip - flip the normal vector with normal_flip = -1.0. The value is multiplies with the normal vector.
@param points - a vector A(i) = {p0, p1, p2, ..., pN} with all points p_i = {px, py, pz} as float3.
@param normals - a vector A(i) = {n0, n1, n2, ..., nN} with all normal vectors n_i = {nx, ny, nz} as float3
@param to_host - if true, the device normals and points are copied back to the host. If false, this is skipped and the data in points and normals remains empty.
NOTE, COPYING DATA REQUIRES A SIGNIFICANT AMOUNT OF TIME AND SHOULD ONLY BE EXECUTED AT THE VERY LAST STEP
*/
//static
int cuPCU3f::CreatePointCloudDev(float* src_device_image_ptr, int width, int height, int channels, float focal_length_x, float focal_length_y, float cx, float cy, int step_size, float normal_flip, vector<float3>& points, vector<float3>& normals, bool to_host )
{
/*
std::ofstream off("test_cu.csv", std::ofstream::out);
cv::Mat img(480,640,CV_32FC1, src_image_ptr);
for (int i = 0; i < 480; i++) {
for (int j = 0; j < 640; j++) {
off << img.at<float>(i,j) << ",";
}
off << "\n";
}
off.close();
*/
step_size = (step_size <= 0) ? 1 : step_size;
int input_size = width* height* channels * sizeof( float);
int output_size = width* height* 3 * sizeof(float); // three channels
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
points.resize(width*height);
normals.resize(width*height);
//---------------------------------------------------------------------------------
// Allocating memory
// Moved to cuPCU::AllocateDeviceMemory()
//
// Allocate memory with AllocateDeviceMemory(.....)
//---------------------------------------------------------------------------------
// Copy memory
/*hipError_t err = hipMemcpy(image_dev, (float*)src_image_ptr, input_size, hipMemcpyHostToDevice);
if (err != 0) {
std::cout << "\n[KNN] - hipMemcpy error.\n";
}
err = hipGetLastError();
if (err != 0) {
std::cout << "\n[KNN] - hipMemcpy error (2).\n";
}*/
//---------------------------------------------------------------------------------
// Process the image
// compute the points
pcu_project_image_points << <blocks, threads_per_block >> > (src_device_image_ptr, width, height, channels, focal_length_x, focal_length_y, cx, cy, point_output_dev, image_output_dev);
hipError_t err = hipGetLastError();
if (err != 0) { std::cout << "\n[KNN] - points processing error.\n"; }
hipDeviceSynchronize();
// compute normal vectors
pcu_calculate_normal_vector << <blocks, threads_per_block >> > (point_output_dev, width, height, step_size, normal_flip, normals_output_dev, image_normals_out_dev);
err = hipGetLastError();
if (err != 0) { std::cout << "\n[KNN] - normals points processing error.\n"; }
hipDeviceSynchronize();
if (!to_host) return 1;
//---------------------------------------------------------------------------------
// Return the data
hipMemcpy(&points[0], point_output_dev, output_size, hipMemcpyDeviceToHost);
hipMemcpy(&normals[0], normals_output_dev, output_size, hipMemcpyDeviceToHost);
//int removed = 0;
//hipMemcpy(&removed, count_dev, sizeof(int), hipMemcpyDeviceToHost);
//cout << "Removed " << removed << " points" << endl;
#ifdef DEBUG_CUPCU
// renders an image of the outcomeing dataset on screen.
// Note. INCREASE THE SCALE IN pcu_project_image_points(...) TO SEE SOME COLOR.
// The values are scaled to meters.
cv::Mat output_points = cv::Mat::zeros(height, width, CV_32FC3);
cv::Mat output_normals = cv::Mat::zeros(height, width, CV_32FC3);
hipMemcpy((float*)output_points.data, image_output_dev, output_size, hipMemcpyDeviceToHost);
hipMemcpy((float*)output_normals.data, image_normals_out_dev, output_size, hipMemcpyDeviceToHost);
cv::Mat test_out, test_out_2, test_outf, test_out_2f;
output_points.convertTo(test_out, CV_8UC3);
output_normals.convertTo(test_out_2, CV_8UC3);
cv::flip(test_out, test_outf,1);
cv::flip(test_out_2, test_out_2f,1);
cv::imshow("Range image out", test_outf);
cv::imshow("Normal image out", test_out_2f);
cv::waitKey();
#endif
hipDeviceSynchronize();
return 1;
}
/*
Init the device memory. The device memory can be re-used. So no need to always create new memory.
@param width - the width of the image in pixels
@param height - the height of the image in pixels
@param channels - the number of channels. A depth image should have only 1 channel.
*/
//static
void cuPCU3f::AllocateDeviceMemory(int width, int height, int channels)
{
//---------------------------------------------------------------------------------
// Allocating memory
cuDevMem3f::AllocateDeviceMemory(width, height, channels);
image_dev = cuDevMem3f::DevInImagePtr();
image_output_dev = cuDevMem3f::DevPointImagePtr();
image_normals_out_dev = cuDevMem3f::DevNormalsImagePtr();
point_output_dev = cuDevMem3f::DevPointPtr();
normals_output_dev = cuDevMem3f::DevNormalsPtr();
// allocate memory for the bilateral filter
cuFilter::AllocateDeviceMemory(width, height, channels);
image_temp_dev = cuDevMem3f::DevTempImagePtr();
}
/*
Free all device memory
*/
//static
void cuPCU3f::FreeDeviceMemory(void)
{
hipFree(image_dev);
hipFree(image_output_dev);
hipFree(image_normals_out_dev);
hipFree(point_output_dev);
hipFree(normals_output_dev);
hipFree(normals_output_clean_dev);
hipFree(point_output_clean_dev);
cuFilter::FreeDeviceMemory();
}
/*
Copy the depth image from the device and return it as an openCV mat.
@param depth_image - reference to the depth image as OpenCV Mat of type CV_32FC3
*/
//static
void cuPCU3f::GetDepthImage(cv::Mat& depth_image)
{
int output_size = depth_image.rows* depth_image.cols * 3 * sizeof(float);
cv::Mat output_points = cv::Mat::zeros(depth_image.rows, depth_image.cols, CV_32FC3);
hipMemcpy((float*)output_points.data, image_output_dev, output_size, hipMemcpyDeviceToHost);
cv::flip(output_points, depth_image, 1);
}
/*
Copy the normal vectors encoded into a rgb image from the device and return it as an openCV mat.
@param depth_image - reference to the normal vector image as OpenCV Mat of type CV_32FC3
*/
//static
void cuPCU3f::GetNormalImage(cv::Mat& normal_image)
{
int output_size = normal_image.rows* normal_image.cols * 3 * sizeof(float);
cv::Mat output_normals = cv::Mat::zeros(normal_image.rows, normal_image.cols, CV_32FC3);
hipMemcpy((float*)output_normals.data, image_normals_out_dev, output_size, hipMemcpyDeviceToHost);
cv::flip(output_normals, normal_image, 1);
}
/*
Create a sample pattern to uniformly remove points from the point set
@param width - the width of the image
@param height - the height of the image
@param sampling_steps - the number of pixels the pattern should step over in each frame
*/
//static
void cuSample3f::CreateUniformSamplePattern(int width, int height, int sampling_steps)
{
//------------------------------------------------------------------------------
// create the sample pattern
if (g_cu_sampling_dev != NULL) hipFree(g_cu_sampling_dev);
int pattern_size = width* height * 1 * sizeof(float);
// image memory on device. It stores the input image, the depth values as array A(i) = {d0, d1, d2, ...., dN} as float
hipError_t err = hipMalloc((void **)&g_cu_sampling_dev, (unsigned int)(pattern_size));
if (err != 0) { std::cout << "\n[KNN] - hipMalloc error.\n"; }
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
pcu_create_uniform_sample_pattern << <blocks, threads_per_block >> > ( width, height, sampling_steps, g_cu_sampling_dev);
err = hipGetLastError();
if (err != 0) { std::cout << "\n[KNN] - points processing error.\n"; }
/*
// For debugging only
hipDeviceSynchronize();
int output_size = width* height * 1 * sizeof(unsigned short); // three channels
int size = sizeof(unsigned short);
cv::Mat output_pattern = cv::Mat::zeros(height, width, CV_16UC1);
int t = output_pattern.type();
size_t sizeInBytes = output_pattern.total() * output_pattern.elemSize();
hipMemcpy((unsigned short*)output_pattern.data, g_cu_sampling_dev, output_size, hipMemcpyDeviceToHost);
cv::Mat test_out;
output_pattern.convertTo(test_out, CV_8UC3, 255);
cv::imshow("Pattern image out", test_out);
cv::waitKey();
*/
}
/*
Create a sample pattern to randomly remove points from the point set
@param width - the width of the image
@param height - the height of the image
@param percentage - a percentage value between 0 and 1 with 1 = 100%
*/
//static
void cuSample3f::CreateRandomSamplePattern(int width, int height, int max_points, float percentage)
{
if (g_cu_random_sampling_dev[0] != NULL) for(auto mem: g_cu_random_sampling_dev) hipFree(mem);
int pattern_size = width* height * sizeof(float);
// image memory on device. It stores the input image, the depth values as array A(i) = {d0, d1, d2, ...., dN} as float
for (auto i = 0; i < max_number_of_random_patterns; i++) {
hipError_t err = hipMalloc((void **)&g_cu_random_sampling_dev[i], (unsigned int)(pattern_size));
if (err != 0) { _cprintf("\n[cuSample] - hipMalloc error.\n"); }
}
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
// allocate memory
hipError_t err = hipMalloc((void **)&dev_index_list, (unsigned int)(width * height * sizeof(int)));
if (err != 0) { _cprintf("\n[cuSample] - hipMalloc error.\n"); }
srand(time(NULL));
for (auto i = 0; i < max_number_of_random_patterns; i++) {
vector<int> index_list(width * height, 0);
//vector<int> numbers;
for (auto i = 0; i < max_points;i++) {
// bool found = false;
int x = -1; //int count = 0;
//while (!found)
{
x = (rand()*(RAND_MAX + 1) + rand()) % (width * height);
//vector<int>::iterator p = std::find(numbers.begin(), numbers.end(), x);
//if (p == numbers.end()) {
// found = true;
//numbers.push_back(x);
//}
//if (count++ > 100000) break; // deadlock prevention
}
index_list[x] = 1;
}
//_cprintf("\nFound %i samples", numbers.size());
err = hipMemcpy(dev_index_list, (int*)index_list.data(), (unsigned int)(width * height * sizeof(int)), hipMemcpyHostToDevice);
if (err != 0) { _cprintf("\n[cuSample] - hipMemcpy error.\n"); }
pcu_create_random_sample_pattern << <blocks, threads_per_block >> > (dev_index_list, width, height, max_points, g_cu_random_sampling_dev[i]);
err = hipGetLastError();
if (err != 0) { _cprintf("\n[cuSample] - pcu_create_random_sample_pattern error.\n"); }
/*
// For debugging only
hipDeviceSynchronize();
int output_size = width* height * 1 * sizeof(unsigned short); // three channels
int size = sizeof(unsigned short);
cv::Mat output_pattern = cv::Mat::zeros(height, width, CV_16UC1);
int t = output_pattern.type();
size_t sizeInBytes = output_pattern.total() * output_pattern.elemSize();
hipMemcpy((unsigned short*)output_pattern.data, g_cu_random_sampling_dev[i], output_size, hipMemcpyDeviceToHost);
cv::Mat test_out;
output_pattern.convertTo(test_out, CV_8UC3, 255);
cv::imshow("Pattern image out", test_out);
cv::waitKey();
*/
}
}
/**********************************************************************************************************************************************************************************************
Create a point cloud from a depth image. The point set is uniformly sampled.
NOTE, use the function CreateSamplePattern() to create the pattern for sampling
@param src_image_ptr - a pointer to the image of size [wdith x height x channels ] stored as an array of type unsigned short which stores the depth values as
A(i) = {d0, d1, d2, ..., dN}
@param width - the width of the image in pixels
@param height - the height of the image in pixels
@param channels - the number of channels. A depth image should have only 1 channel.
@param focal_legnth - the focal length of the camera in pixel
@param normal_radius - for normal vector calculations. The interger specifies how many steps a neighbor sould be away no obtain a vector for normal vector calculation.
Minimum normal_radius is 1.
@param normal_flip - set this value to -1 to flip the normal vectors, otherwise to 1. Ignore all other values. normal_flip is multiplied with the normal vector.
@param points - a vector A(i) = {p0, p1, p2, ..., pN} with all points p_i = {px, py, pz} as float3.
@param normals - a vector A(i) = {n0, n1, n2, ..., nN} with all normal vectors n_i = {nx, ny, nz} as float3
@param to_host - if true, the device normals and points are copied back to the host. If false, this is skipped and the data in points and normals remains empty.
NOTE, COPYING DATA REQUIRES A SIGNIFICANT AMOUNT OF TIME AND SHOULD ONLY BE EXECUTED AT THE VERY LAST STEP
**********************************************************************************************************************************************************************************************/
//static
void cuSample3f::UniformSampling(float* src_image_ptr, int width, int height, float focal_length_x, float focal_length_y, float cx, float cy, int normal_radius, float normal_flip, bool cp_enabled, vector<float3>& points, vector<float3>& normals, bool to_host)
{
// Uniform sample pattern must be initialized in advance.
assert(g_cu_sampling_dev != NULL);
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
//-----------------------------------------------------------
// Filter the point cloud
// image_temp_dev is device memory.
cuFilter::ApplyBilateralFilter((float*)src_image_ptr, width, height, 1, (float*)image_temp_dev, false);
//-----------------------------------------------------------
// Create the point cloud
cuPCU3f::CreatePointCloudDev((float*)image_temp_dev, width, height, 1, focal_length_x, focal_length_y, cx, cy, normal_radius, normal_flip, points, normals, false);
//-----------------------------------------------------------
// Sampling
hipLaunchKernelGGL(( pcu_uniform_sampling) , dim3(blocks), dim3(threads_per_block) , 0, 0, point_output_dev, normals_output_dev, width, height, cp_enabled, cuDevMem3f::DevParamsPtr(), g_cu_sampling_dev, point_output_dev, normals_output_dev);
hipError_t err = hipGetLastError();
if (err != 0) { std::cout << "\n[KNN] - points processing error.\n"; }
hipDeviceSynchronize();
//---------------------------------------------------------------------------------
// Return the data
if (!to_host) return;
int output_size = width* height * 3 * sizeof(float); // three channels
hipMemcpy(&points[0], point_output_dev, output_size, hipMemcpyDeviceToHost);
hipMemcpy(&normals[0], normals_output_dev, output_size, hipMemcpyDeviceToHost);
}
/**********************************************************************************************************************************************************************************************
Create a point cloud from a depth image. The point set is randomly sampled.
NOTE, use the function CreateRandomSamplePattern() to create the pattern for sampling
@param src_image_ptr - a pointer to the image of size [wdith x height x channels ] stored as an array of type unsigned short which stores the depth values as
A(i) = {d0, d1, d2, ..., dN}
@param width - the width of the image in pixels
@param height - the height of the image in pixels
@param channels - the number of channels. A depth image should have only 1 channel.
@param focal_legnth - the focal length of the camera in pixel
@param normal_radius - for normal vector calculations. The interger specifies how many steps a neighbor sould be away no obtain a vector for normal vector calculation.
Minimum normal_radius is 1.
@param normal_flip - set this value to -1 to flip the normal vectors, otherwise to 1. Ignore all other values. normal_flip is multiplied with the normal vector.
@param points - a vector A(i) = {p0, p1, p2, ..., pN} with all points p_i = {px, py, pz} as float3.
@param normals - a vector A(i) = {n0, n1, n2, ..., nN} with all normal vectors n_i = {nx, ny, nz} as float3
@param to_host - if true, the device normals and points are copied back to the host. If false, this is skipped and the data in points and normals remains empty.
NOTE, COPYING DATA REQUIRES A SIGNIFICANT AMOUNT OF TIME AND SHOULD ONLY BE EXECUTED AT THE VERY LAST STEP
**********************************************************************************************************************************************************************************************/
//static
void cuSample3f::RandomSampling(float* src_image_ptr, int width, int height, float focal_length, int normal_radius, float normal_flip, bool cp_enabled, vector<float3>& points, vector<float3>& normals, bool to_host)
{
// Uniform sample pattern must be initialized in advance.
assert(g_cu_sampling_dev != NULL);
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
//-----------------------------------------------------------
// Create the point cloud
cuPCU3f::CreatePointCloud((float*)src_image_ptr, width, height, 1, focal_length, focal_length, 0.0 ,0.0, normal_radius, normal_flip, points, normals, false);
//-----------------------------------------------------------
// Sampling
pcu_uniform_sampling << < blocks, threads_per_block >> > (point_output_dev, normals_output_dev, width, height, cp_enabled, cuDevMem3f::DevParamsPtr(),
g_cu_random_sampling_dev[(g_current_random_pattern_index++)% max_number_of_random_patterns], point_output_dev, normals_output_dev);
hipError_t err = hipGetLastError();
if (err != 0) { std::cout << "\n[KNN] - points processing error.\n"; }
hipDeviceSynchronize();
//---------------------------------------------------------------------------------
// Return the data
if (!to_host) return;
int output_size = width* height * 3 * sizeof(float); // three channels
hipMemcpy(&points[0], point_output_dev, output_size, hipMemcpyDeviceToHost);
hipMemcpy(&normals[0], normals_output_dev, output_size, hipMemcpyDeviceToHost);
}
/*
Set parameters for a cutting plane that removes points from the point set.
The plane is defined by A * x + B * y + C * z = D
// where a point gets removed if D - current_D > CP_THRESHOLD
*/
//static
void cuSample3f::SetCuttingPlaneParams(float a, float b, float c, float d, float threshold)
{
float* params = cuDevMem3f::DevParamsPtr();
float host_params[5];
host_params[0] = a;
host_params[1] = b;
host_params[2] = c;
host_params[3] = d;
host_params[4] = threshold;
hipError_t err = hipMemcpy(params, (float*)host_params, 5 * sizeof(float), hipMemcpyHostToDevice);
if (err != 0) { std::cout << "\n[cuSample] - hipMemcpy error.\n"; }
}
/*!
Set a point cloud filter methods.
@param method - can be NONE or BILATERAL
@param param - the parameters for the filter
*/
//static
void cuFilter3f::SetFilterMethod(FilterMethod method, FilterParams param)
{
cuFilter::Params p;
p.kernel_size = param.kernel_size;
p.sigmaI = param.sigmaI;
p.sigmaS = param.sigmaS;
cuFilter::SetBilateralFilterParams(p);
if(method == FilterMethod::NONE)
cuFilter::Enable(false);
else
cuFilter::Enable(true);
} | 39da68d0e15a5b79e9737080f59d998049c010d9.cu | #include "../../../include/camera/cuda/cuPCU3f.h"
// To use the memory that is defined in cuDeviceMemory/
// If this flag is not set, the device memory that is locally allocated is used.
#define GLOBAL_CUDA_MEMORY
// local
#include "cutil_math.h"
#include "cuDeviceMemory3f.h"
#include "cuPCUImpl3f.h"
#include "cuFilter.h" // bilateral filter
// stl
#include <conio.h>
using namespace texpert;
// To read image data out from the device.
//#define DEBUG_CUPCU
namespace texpert_cuPCU3f
{
// The input image on the device
float* image_dev;
float* image_temp_dev;
// debug images to visualize the output
float* image_output_dev = NULL;
float* image_normals_out_dev = NULL;
// the points and normal vectors
float3* point_output_dev = NULL;
float3* normals_output_dev = NULL;
// the points and normal vectors with all NAN and [0,0,0] points removed.
float3* point_output_clean_dev = NULL;
float3* normals_output_clean_dev = NULL;
int* count_dev = NULL; // returns the number of NaN elements that were removed.
int* dev_index_list = NULL;
// Memory for sampling pattern, for the uniform sample operation.
// It stores the sample pattern with 0 and 1.
unsigned short* g_cu_sampling_dev = NULL;
// The number of random pattern that should be used.
// The pattern are generated before the frame reader is started and
// changed to simulate "random"
const int max_number_of_random_patterns = 10;
// the index of the current random pattern in use
int g_current_random_pattern_index = 0;
// Memory for a random sampling pattern
unsigned short* g_cu_random_sampling_dev[max_number_of_random_patterns];
// 16 threads appears to be a few ms faster than 32 for a 640x480 image
// due to a few simple runtime tests.
const int THREADS_PER_BLOCK = 32;
}
using namespace texpert_cuPCU3f;
__global__ void pcu_cleanup_points(float3* src_points, float3* src_normals, int width, int height, int start, float3* dst_points, float3* dst_normals, int* good_points )
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int index = (j * width) + (i);
__shared__ int count;
if (count <= 0) count = start;
if (src_points[index].x == 0 && src_points[index].y == 0 && src_points[index].z == 0) return;
int local_id = count; count++;
dst_points[local_id] = src_points[index];
dst_normals[local_id] = src_normals[index];
(*good_points) = count;
}
/*
Project a image element at position [x, y] from image space into 3D space using the focal length of the camera.
@param x, y - the x and y position of the image element in the image grid in pixels.
@param pixel_depth - the depth of this particular pixel as float value.
@param focal_length_x - the focal lenght of the camera in pixel. x is the horizontal axis.
@param focal_length_y - the focal lenght of the camera in pixel. y is the vertical axis.
@param float cx, cy - the principle point
@param px, py, pz - pointers to the point values for z, y, z.
*/
__device__ void pcu_project_point(float x, float y, float pixel_depth, float focal_length_x, float focal_length_y, float cx, float cy, float* px, float* py, float* pz)
{
const float fl_const_x = 1.0f / focal_length_x;
const float fl_const_y = 1.0f / focal_length_y;
// Conversion from millimeters to meters
static const float conv_fac = 0.001f;
float x_val = 0.0f;
float y_val = 0.0f;
float z_val = 0.0f;
if(!isnan(pixel_depth)){
x_val = -(float) x * pixel_depth * fl_const_x * conv_fac + cx;
y_val = -(float) y * pixel_depth * fl_const_y * conv_fac + cy;
z_val = pixel_depth *conv_fac;
}
(*px) = x_val;
(*py) = y_val;
(*pz) = z_val;
}
/*
Calculates the normal vectors for points. The points must be organized in a grid.
@param src_points - the points organized in a float grid of size [width x height] and stored as float3 array.
@param width - the width of the image
@param height - the height of the image
@param step_size - the number of points that should be stepped over for the normal vector calculation
@param flip_normal - parameter is multiplied with normal vector. Set to 1 to keep the normal vector, set to -1 to flip it.
@param dst_normals - a float3 array to store the normal vectors as float3 [nx, ny, nz]
@param dst_image - the normal vector data is stored in an image as RGB values. For debug purpose.
*/
__global__ void pcu_calculate_normal_vector( float3* src_points, int width, int height, int step_size, float flip_normal, float3* dst_normals, float* dst_image)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
int index_center = (j * width ) + (i );
int size = width * height;
// for visualization
int index_out = (j * width * 3) + (i * 3); // three channels for the output image
// for the src_points of type float, the channel size is 1
int index_north = ((j - step_size) * width ) + (i );
int index_east = (j * width ) + ((i + step_size ));
int index_south = ((j + step_size) * width ) + (i);
int index_west = ( j * width ) + ((i- step_size));
float3 reset_point; reset_point.x = 0.0f; reset_point.y = 0.0f; reset_point.z =0.0f;
float3 aveNormal; aveNormal.x = 0.0f; aveNormal.y = 0.0f; aveNormal.z = 0.0f;
float3 center = src_points[index_center];
int pointsUsed = 0;
const float max_dist = 0.2f;
//if (i >= step_size && i < width - step_size && j >= step_size && j < height - step_size)
//cross product quadrant 1
if (i < width-step_size && j >= step_size)
{
float3 north = src_points[index_north];
float3 east = src_points[index_east];
float3 temp = cross(east-center, north - center);
if (isfinite(temp.x) && fabs(east.z - center.z) < max_dist & fabs(north.z - center.z) < max_dist)
{
temp = normalize(temp);
if(!isnan(temp.x)){
aveNormal += temp;
pointsUsed++;
}
}
}
//cross product quadrant 2
if (i >= step_size && j >= step_size)
{
float3 north = src_points[index_north];
float3 west = src_points[index_west];
float3 temp = cross( north - center, west - center);
if (isfinite(temp.x) && abs(west.z - center.z) < max_dist && abs(north.z - center.z) < max_dist )
{
temp = normalize(temp);
if(!isnan(temp.x)){
aveNormal += temp;
pointsUsed++;
}
}
}
//cross product quadrant 3
if (i >= step_size && j < height - step_size)
{
float3 south = src_points[index_south];
float3 west = src_points[index_west];
float3 temp = cross( west - center, south - center);
if (isfinite(temp.x) && abs(west.z - center.z) < max_dist && abs(south.z - center.z) < max_dist)
{
temp = normalize(temp);
if(!isnan(temp.x)){
aveNormal += temp;
pointsUsed++;
}
}
}
//cross product quadrant 4
if ( i < width - step_size && j < height - step_size)
{
float3 south = src_points[index_south];
float3 east= src_points[index_east];
float3 temp = cross( south - center, east - center);
if (isfinite(temp.x) && abs(east.z - center.z) < max_dist && abs(south.z - center.z) < max_dist)
{
temp = normalize(temp);
if(!isnan(temp.x)){
aveNormal += temp;
pointsUsed++;
}
}
}
// check whether a normal vector exists
if(pointsUsed > 0){
aveNormal /= (pointsUsed);
//make unit vector
aveNormal = flip_normal * normalize( aveNormal);
src_points[index_center] = center;
}else
{
// we do not want a point if we do not have a normal vector
src_points[index_center] = reset_point;
}
dst_normals[index_center] = aveNormal;
// for visualization
dst_image[index_out] = aveNormal.z * 255.0;
dst_image[index_out+1] = aveNormal.y * 255.0;
dst_image[index_out+2] = aveNormal.x * 255.0;
}
/*
Project all points into 3D space
@param image - the input image of type unsigned short. One channel (with depth images) of size width x height.
@param width - the width of the image in pixel
@param height - the height of the immage in pixel
@param channels - the channels. Should be only one.
@param focal length - the focal length of the camera.
@param dst_point - pointer to the array with float3 that store all points.
@param dst_iamge - image with three channels that store the [x, y, z] position of each point in an image frame [i, j]
The image is for debug purposes or visualization
*/
__global__ void pcu_project_image_points(float *image, int width, int height, int channels, float focal_length_x, float focal_length_y, float cx, float cy, float3* dst_point, float* dst_image) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
//int k = (blockIdx.z * blockDim.z) + threadIdx.z;
int index = (j * width * channels) + (i * channels);
int index_out = (j * width * 3) + (i * 3); // three channels for the output image
float depth = (float)image[index];
float px = 0.0;
float py = 0.0;
float pz = 0.0;
//------------------------------------------------------------------------------
// projection
pcu_project_point((float)i-width/2, (float)j-height/2, (float)image[index], focal_length_x, focal_length_y, cx, cy, &px, &py, &pz);
// RR, May 5, 2018
// Adopted to get a mean value from cuPositionFromRect
const float scale = 1.0; // to see some colors, scale to something dfferent than 1.
// The coordinates are flipped in pcu_project_point, thus, negative
dst_image[index_out] = (px * scale);
dst_image[index_out + 1] = (py * scale);
dst_image[index_out + 2] = (pz * scale);
dst_point[index].x = px;
dst_point[index].y = py;
dst_point[index].z = pz;
}
/*********************************************************************************************************************************************************************************************
Create a point cloud from a depth image
@param src_image_ptr - a pointer to the image of size [wdith x height x channels ] stored as an array of type float which stores the depth values as
A(i) = {d0, d1, d2, ..., dN} in mm.
@param width - the width of the image in pixels
@param height - the height of the image in pixels
@param channels - the number of channels. A depth image should have only 1 channel.
@param focal_legnth - the focal length of the camera in pixel
@param float cx, cy - the principle point
@param step_size - for normal vector calculations. The interger specifies how many steps a neighbor sould be away no obtain a vector for normal vector calculation.
Minimum step_size is 1.
@param points - a vector A(i) = {p0, p1, p2, ..., pN} with all points p_i = {px, py, pz} as float3.
@param normals - a vector A(i) = {n0, n1, n2, ..., nN} with all normal vectors n_i = {nx, ny, nz} as float3
@param to_host - if true, the device normals and points are copied back to the host. If false, this is skipped and the data in points and normals remains empty.
NOTE, COPYING DATA REQUIRES A SIGNIFICANT AMOUNT OF TIME AND SHOULD ONLY BE EXECUTED AT THE VERY LAST STEP
**********************************************************************************************************************************************************************************************/
//static
int cuPCU3f::CreatePointCloud(float* src_image_ptr, int width, int height, int channels, float focal_length_x, float focal_length_y, float cx, float cy, int step_size, float normal_flip, vector<float3>& points, vector<float3>& normals, bool to_host)
{
/*
std::ofstream off("test_cu.csv", std::ofstream::out);
cv::Mat img(480,640,CV_32FC1, src_image_ptr);
for (int i = 0; i < 480; i++) {
for (int j = 0; j < 640; j++) {
off << img.at<float>(i,j) << ",";
}
off << "\n";
}
off.close();
*/
step_size = (step_size <= 0) ? 1 : step_size;
int input_size = width* height* channels * sizeof( float);
int output_size = width* height* 3 * sizeof(float); // three channels
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
points.resize(width*height);
normals.resize(width*height);
//---------------------------------------------------------------------------------
// Allocating memory
// Moved to cuPCU::AllocateDeviceMemory()
//
// Allocate memory with AllocateDeviceMemory(.....)
//---------------------------------------------------------------------------------
// Copy memory
cudaError err = cudaMemcpy(image_dev, (float*)src_image_ptr, input_size, cudaMemcpyHostToDevice);
if (err != 0) {
std::cout << "\n[KNN] - cudaMemcpy error.\n";
}
err = cudaGetLastError();
if (err != 0) {
std::cout << "\n[KNN] - cudaMemcpy error (2).\n";
}
//---------------------------------------------------------------------------------
// Process the image
// compute the points
pcu_project_image_points << <blocks, threads_per_block >> > (image_dev, width, height, channels, focal_length_x, focal_length_y, cx, cy, point_output_dev, image_output_dev);
err = cudaGetLastError();
if (err != 0) { std::cout << "\n[KNN] - points processing error.\n"; }
cudaDeviceSynchronize();
// compute normal vectors
pcu_calculate_normal_vector << <blocks, threads_per_block >> > (point_output_dev, width, height, step_size, normal_flip, normals_output_dev, image_normals_out_dev);
err = cudaGetLastError();
if (err != 0) { std::cout << "\n[KNN] - normals points processing error.\n"; }
cudaDeviceSynchronize();
if (!to_host) return 1;
//---------------------------------------------------------------------------------
// Return the data
cudaMemcpy(&points[0], point_output_dev, output_size, cudaMemcpyDeviceToHost);
cudaMemcpy(&normals[0], normals_output_dev, output_size, cudaMemcpyDeviceToHost);
//int removed = 0;
//cudaMemcpy(&removed, count_dev, sizeof(int), cudaMemcpyDeviceToHost);
//cout << "Removed " << removed << " points" << endl;
#ifdef DEBUG_CUPCU
// renders an image of the outcomeing dataset on screen.
// Note. INCREASE THE SCALE IN pcu_project_image_points(...) TO SEE SOME COLOR.
// The values are scaled to meters.
cv::Mat output_points = cv::Mat::zeros(height, width, CV_32FC3);
cv::Mat output_normals = cv::Mat::zeros(height, width, CV_32FC3);
cudaMemcpy((float*)output_points.data, image_output_dev, output_size, cudaMemcpyDeviceToHost);
cudaMemcpy((float*)output_normals.data, image_normals_out_dev, output_size, cudaMemcpyDeviceToHost);
cv::Mat test_out, test_out_2, test_outf, test_out_2f;
output_points.convertTo(test_out, CV_8UC3);
output_normals.convertTo(test_out_2, CV_8UC3);
cv::flip(test_out, test_outf,1);
cv::flip(test_out_2, test_out_2f,1);
cv::imshow("Range image out", test_outf);
cv::imshow("Normal image out", test_out_2f);
cv::waitKey();
#endif
cudaDeviceSynchronize();
return 1;
}
/*
Create a point cloud from a depth image with all points from device images as source. There is no copy operation involed.
@param src_device_image_ptr - a pointer to the image of size [wdith x height x channels ] stored as an array of type float which stores the depth values as
A(i) = {d0, d1, d2, ..., dN} in mm. . The image pointer points to DEVICE MEMORY.
@param width - the width of the image in pixels
@param height - the height of the image in pixels
@param channels - the number of channels. A depth image should have only 1 channel.
@param focal_legnth - the focal length of the camera in pixel
@param step_size - for normal vector calculations. The interger specifies how many steps a neighbor sould be away no obtain a vector for normal vector calculation.
Minimum step_size is 1.
@param normal_flip - flip the normal vector with normal_flip = -1.0. The value is multiplies with the normal vector.
@param points - a vector A(i) = {p0, p1, p2, ..., pN} with all points p_i = {px, py, pz} as float3.
@param normals - a vector A(i) = {n0, n1, n2, ..., nN} with all normal vectors n_i = {nx, ny, nz} as float3
@param to_host - if true, the device normals and points are copied back to the host. If false, this is skipped and the data in points and normals remains empty.
NOTE, COPYING DATA REQUIRES A SIGNIFICANT AMOUNT OF TIME AND SHOULD ONLY BE EXECUTED AT THE VERY LAST STEP
*/
//static
int cuPCU3f::CreatePointCloudDev(float* src_device_image_ptr, int width, int height, int channels, float focal_length_x, float focal_length_y, float cx, float cy, int step_size, float normal_flip, vector<float3>& points, vector<float3>& normals, bool to_host )
{
/*
std::ofstream off("test_cu.csv", std::ofstream::out);
cv::Mat img(480,640,CV_32FC1, src_image_ptr);
for (int i = 0; i < 480; i++) {
for (int j = 0; j < 640; j++) {
off << img.at<float>(i,j) << ",";
}
off << "\n";
}
off.close();
*/
step_size = (step_size <= 0) ? 1 : step_size;
int input_size = width* height* channels * sizeof( float);
int output_size = width* height* 3 * sizeof(float); // three channels
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
points.resize(width*height);
normals.resize(width*height);
//---------------------------------------------------------------------------------
// Allocating memory
// Moved to cuPCU::AllocateDeviceMemory()
//
// Allocate memory with AllocateDeviceMemory(.....)
//---------------------------------------------------------------------------------
// Copy memory
/*cudaError err = cudaMemcpy(image_dev, (float*)src_image_ptr, input_size, cudaMemcpyHostToDevice);
if (err != 0) {
std::cout << "\n[KNN] - cudaMemcpy error.\n";
}
err = cudaGetLastError();
if (err != 0) {
std::cout << "\n[KNN] - cudaMemcpy error (2).\n";
}*/
//---------------------------------------------------------------------------------
// Process the image
// compute the points
pcu_project_image_points << <blocks, threads_per_block >> > (src_device_image_ptr, width, height, channels, focal_length_x, focal_length_y, cx, cy, point_output_dev, image_output_dev);
cudaError err = cudaGetLastError();
if (err != 0) { std::cout << "\n[KNN] - points processing error.\n"; }
cudaDeviceSynchronize();
// compute normal vectors
pcu_calculate_normal_vector << <blocks, threads_per_block >> > (point_output_dev, width, height, step_size, normal_flip, normals_output_dev, image_normals_out_dev);
err = cudaGetLastError();
if (err != 0) { std::cout << "\n[KNN] - normals points processing error.\n"; }
cudaDeviceSynchronize();
if (!to_host) return 1;
//---------------------------------------------------------------------------------
// Return the data
cudaMemcpy(&points[0], point_output_dev, output_size, cudaMemcpyDeviceToHost);
cudaMemcpy(&normals[0], normals_output_dev, output_size, cudaMemcpyDeviceToHost);
//int removed = 0;
//cudaMemcpy(&removed, count_dev, sizeof(int), cudaMemcpyDeviceToHost);
//cout << "Removed " << removed << " points" << endl;
#ifdef DEBUG_CUPCU
// renders an image of the outcomeing dataset on screen.
// Note. INCREASE THE SCALE IN pcu_project_image_points(...) TO SEE SOME COLOR.
// The values are scaled to meters.
cv::Mat output_points = cv::Mat::zeros(height, width, CV_32FC3);
cv::Mat output_normals = cv::Mat::zeros(height, width, CV_32FC3);
cudaMemcpy((float*)output_points.data, image_output_dev, output_size, cudaMemcpyDeviceToHost);
cudaMemcpy((float*)output_normals.data, image_normals_out_dev, output_size, cudaMemcpyDeviceToHost);
cv::Mat test_out, test_out_2, test_outf, test_out_2f;
output_points.convertTo(test_out, CV_8UC3);
output_normals.convertTo(test_out_2, CV_8UC3);
cv::flip(test_out, test_outf,1);
cv::flip(test_out_2, test_out_2f,1);
cv::imshow("Range image out", test_outf);
cv::imshow("Normal image out", test_out_2f);
cv::waitKey();
#endif
cudaDeviceSynchronize();
return 1;
}
/*
Init the device memory. The device memory can be re-used. So no need to always create new memory.
@param width - the width of the image in pixels
@param height - the height of the image in pixels
@param channels - the number of channels. A depth image should have only 1 channel.
*/
//static
void cuPCU3f::AllocateDeviceMemory(int width, int height, int channels)
{
//---------------------------------------------------------------------------------
// Allocating memory
cuDevMem3f::AllocateDeviceMemory(width, height, channels);
image_dev = cuDevMem3f::DevInImagePtr();
image_output_dev = cuDevMem3f::DevPointImagePtr();
image_normals_out_dev = cuDevMem3f::DevNormalsImagePtr();
point_output_dev = cuDevMem3f::DevPointPtr();
normals_output_dev = cuDevMem3f::DevNormalsPtr();
// allocate memory for the bilateral filter
cuFilter::AllocateDeviceMemory(width, height, channels);
image_temp_dev = cuDevMem3f::DevTempImagePtr();
}
/*
Free all device memory
*/
//static
void cuPCU3f::FreeDeviceMemory(void)
{
cudaFree(image_dev);
cudaFree(image_output_dev);
cudaFree(image_normals_out_dev);
cudaFree(point_output_dev);
cudaFree(normals_output_dev);
cudaFree(normals_output_clean_dev);
cudaFree(point_output_clean_dev);
cuFilter::FreeDeviceMemory();
}
/*
Copy the depth image from the device and return it as an openCV mat.
@param depth_image - reference to the depth image as OpenCV Mat of type CV_32FC3
*/
//static
void cuPCU3f::GetDepthImage(cv::Mat& depth_image)
{
int output_size = depth_image.rows* depth_image.cols * 3 * sizeof(float);
cv::Mat output_points = cv::Mat::zeros(depth_image.rows, depth_image.cols, CV_32FC3);
cudaMemcpy((float*)output_points.data, image_output_dev, output_size, cudaMemcpyDeviceToHost);
cv::flip(output_points, depth_image, 1);
}
/*
Copy the normal vectors encoded into a rgb image from the device and return it as an openCV mat.
@param depth_image - reference to the normal vector image as OpenCV Mat of type CV_32FC3
*/
//static
void cuPCU3f::GetNormalImage(cv::Mat& normal_image)
{
int output_size = normal_image.rows* normal_image.cols * 3 * sizeof(float);
cv::Mat output_normals = cv::Mat::zeros(normal_image.rows, normal_image.cols, CV_32FC3);
cudaMemcpy((float*)output_normals.data, image_normals_out_dev, output_size, cudaMemcpyDeviceToHost);
cv::flip(output_normals, normal_image, 1);
}
/*
Create a sample pattern to uniformly remove points from the point set
@param width - the width of the image
@param height - the height of the image
@param sampling_steps - the number of pixels the pattern should step over in each frame
*/
//static
void cuSample3f::CreateUniformSamplePattern(int width, int height, int sampling_steps)
{
//------------------------------------------------------------------------------
// create the sample pattern
if (g_cu_sampling_dev != NULL) cudaFree(g_cu_sampling_dev);
int pattern_size = width* height * 1 * sizeof(float);
// image memory on device. It stores the input image, the depth values as array A(i) = {d0, d1, d2, ...., dN} as float
cudaError err = cudaMalloc((void **)&g_cu_sampling_dev, (unsigned int)(pattern_size));
if (err != 0) { std::cout << "\n[KNN] - cudaMalloc error.\n"; }
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
pcu_create_uniform_sample_pattern << <blocks, threads_per_block >> > ( width, height, sampling_steps, g_cu_sampling_dev);
err = cudaGetLastError();
if (err != 0) { std::cout << "\n[KNN] - points processing error.\n"; }
/*
// For debugging only
cudaDeviceSynchronize();
int output_size = width* height * 1 * sizeof(unsigned short); // three channels
int size = sizeof(unsigned short);
cv::Mat output_pattern = cv::Mat::zeros(height, width, CV_16UC1);
int t = output_pattern.type();
size_t sizeInBytes = output_pattern.total() * output_pattern.elemSize();
cudaMemcpy((unsigned short*)output_pattern.data, g_cu_sampling_dev, output_size, cudaMemcpyDeviceToHost);
cv::Mat test_out;
output_pattern.convertTo(test_out, CV_8UC3, 255);
cv::imshow("Pattern image out", test_out);
cv::waitKey();
*/
}
/*
Create a sample pattern to randomly remove points from the point set
@param width - the width of the image
@param height - the height of the image
@param percentage - a percentage value between 0 and 1 with 1 = 100%
*/
//static
void cuSample3f::CreateRandomSamplePattern(int width, int height, int max_points, float percentage)
{
if (g_cu_random_sampling_dev[0] != NULL) for(auto mem: g_cu_random_sampling_dev) cudaFree(mem);
int pattern_size = width* height * sizeof(float);
// image memory on device. It stores the input image, the depth values as array A(i) = {d0, d1, d2, ...., dN} as float
for (auto i = 0; i < max_number_of_random_patterns; i++) {
cudaError err = cudaMalloc((void **)&g_cu_random_sampling_dev[i], (unsigned int)(pattern_size));
if (err != 0) { _cprintf("\n[cuSample] - cudaMalloc error.\n"); }
}
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
// allocate memory
cudaError err = cudaMalloc((void **)&dev_index_list, (unsigned int)(width * height * sizeof(int)));
if (err != 0) { _cprintf("\n[cuSample] - cudaMalloc error.\n"); }
srand(time(NULL));
for (auto i = 0; i < max_number_of_random_patterns; i++) {
vector<int> index_list(width * height, 0);
//vector<int> numbers;
for (auto i = 0; i < max_points;i++) {
// bool found = false;
int x = -1; //int count = 0;
//while (!found)
{
x = (rand()*(RAND_MAX + 1) + rand()) % (width * height);
//vector<int>::iterator p = std::find(numbers.begin(), numbers.end(), x);
//if (p == numbers.end()) {
// found = true;
//numbers.push_back(x);
//}
//if (count++ > 100000) break; // deadlock prevention
}
index_list[x] = 1;
}
//_cprintf("\nFound %i samples", numbers.size());
err = cudaMemcpy(dev_index_list, (int*)index_list.data(), (unsigned int)(width * height * sizeof(int)), cudaMemcpyHostToDevice);
if (err != 0) { _cprintf("\n[cuSample] - cudaMemcpy error.\n"); }
pcu_create_random_sample_pattern << <blocks, threads_per_block >> > (dev_index_list, width, height, max_points, g_cu_random_sampling_dev[i]);
err = cudaGetLastError();
if (err != 0) { _cprintf("\n[cuSample] - pcu_create_random_sample_pattern error.\n"); }
/*
// For debugging only
cudaDeviceSynchronize();
int output_size = width* height * 1 * sizeof(unsigned short); // three channels
int size = sizeof(unsigned short);
cv::Mat output_pattern = cv::Mat::zeros(height, width, CV_16UC1);
int t = output_pattern.type();
size_t sizeInBytes = output_pattern.total() * output_pattern.elemSize();
cudaMemcpy((unsigned short*)output_pattern.data, g_cu_random_sampling_dev[i], output_size, cudaMemcpyDeviceToHost);
cv::Mat test_out;
output_pattern.convertTo(test_out, CV_8UC3, 255);
cv::imshow("Pattern image out", test_out);
cv::waitKey();
*/
}
}
/**********************************************************************************************************************************************************************************************
Create a point cloud from a depth image. The point set is uniformly sampled.
NOTE, use the function CreateSamplePattern() to create the pattern for sampling
@param src_image_ptr - a pointer to the image of size [wdith x height x channels ] stored as an array of type unsigned short which stores the depth values as
A(i) = {d0, d1, d2, ..., dN}
@param width - the width of the image in pixels
@param height - the height of the image in pixels
@param channels - the number of channels. A depth image should have only 1 channel.
@param focal_legnth - the focal length of the camera in pixel
@param normal_radius - for normal vector calculations. The interger specifies how many steps a neighbor sould be away no obtain a vector for normal vector calculation.
Minimum normal_radius is 1.
@param normal_flip - set this value to -1 to flip the normal vectors, otherwise to 1. Ignore all other values. normal_flip is multiplied with the normal vector.
@param points - a vector A(i) = {p0, p1, p2, ..., pN} with all points p_i = {px, py, pz} as float3.
@param normals - a vector A(i) = {n0, n1, n2, ..., nN} with all normal vectors n_i = {nx, ny, nz} as float3
@param to_host - if true, the device normals and points are copied back to the host. If false, this is skipped and the data in points and normals remains empty.
NOTE, COPYING DATA REQUIRES A SIGNIFICANT AMOUNT OF TIME AND SHOULD ONLY BE EXECUTED AT THE VERY LAST STEP
**********************************************************************************************************************************************************************************************/
//static
void cuSample3f::UniformSampling(float* src_image_ptr, int width, int height, float focal_length_x, float focal_length_y, float cx, float cy, int normal_radius, float normal_flip, bool cp_enabled, vector<float3>& points, vector<float3>& normals, bool to_host)
{
// Uniform sample pattern must be initialized in advance.
assert(g_cu_sampling_dev != NULL);
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
//-----------------------------------------------------------
// Filter the point cloud
// image_temp_dev is device memory.
cuFilter::ApplyBilateralFilter((float*)src_image_ptr, width, height, 1, (float*)image_temp_dev, false);
//-----------------------------------------------------------
// Create the point cloud
cuPCU3f::CreatePointCloudDev((float*)image_temp_dev, width, height, 1, focal_length_x, focal_length_y, cx, cy, normal_radius, normal_flip, points, normals, false);
//-----------------------------------------------------------
// Sampling
pcu_uniform_sampling <<< blocks, threads_per_block >>> (point_output_dev, normals_output_dev, width, height, cp_enabled, cuDevMem3f::DevParamsPtr(), g_cu_sampling_dev, point_output_dev, normals_output_dev);
cudaError err = cudaGetLastError();
if (err != 0) { std::cout << "\n[KNN] - points processing error.\n"; }
cudaDeviceSynchronize();
//---------------------------------------------------------------------------------
// Return the data
if (!to_host) return;
int output_size = width* height * 3 * sizeof(float); // three channels
cudaMemcpy(&points[0], point_output_dev, output_size, cudaMemcpyDeviceToHost);
cudaMemcpy(&normals[0], normals_output_dev, output_size, cudaMemcpyDeviceToHost);
}
/**********************************************************************************************************************************************************************************************
Create a point cloud from a depth image. The point set is randomly sampled.
NOTE, use the function CreateRandomSamplePattern() to create the pattern for sampling
@param src_image_ptr - a pointer to the image of size [wdith x height x channels ] stored as an array of type unsigned short which stores the depth values as
A(i) = {d0, d1, d2, ..., dN}
@param width - the width of the image in pixels
@param height - the height of the image in pixels
@param channels - the number of channels. A depth image should have only 1 channel.
@param focal_legnth - the focal length of the camera in pixel
@param normal_radius - for normal vector calculations. The interger specifies how many steps a neighbor sould be away no obtain a vector for normal vector calculation.
Minimum normal_radius is 1.
@param normal_flip - set this value to -1 to flip the normal vectors, otherwise to 1. Ignore all other values. normal_flip is multiplied with the normal vector.
@param points - a vector A(i) = {p0, p1, p2, ..., pN} with all points p_i = {px, py, pz} as float3.
@param normals - a vector A(i) = {n0, n1, n2, ..., nN} with all normal vectors n_i = {nx, ny, nz} as float3
@param to_host - if true, the device normals and points are copied back to the host. If false, this is skipped and the data in points and normals remains empty.
NOTE, COPYING DATA REQUIRES A SIGNIFICANT AMOUNT OF TIME AND SHOULD ONLY BE EXECUTED AT THE VERY LAST STEP
**********************************************************************************************************************************************************************************************/
//static
void cuSample3f::RandomSampling(float* src_image_ptr, int width, int height, float focal_length, int normal_radius, float normal_flip, bool cp_enabled, vector<float3>& points, vector<float3>& normals, bool to_host)
{
// Uniform sample pattern must be initialized in advance.
assert(g_cu_sampling_dev != NULL);
dim3 threads_per_block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
dim3 blocks(width / threads_per_block.x,
height / threads_per_block.y,
1);
//-----------------------------------------------------------
// Create the point cloud
cuPCU3f::CreatePointCloud((float*)src_image_ptr, width, height, 1, focal_length, focal_length, 0.0 ,0.0, normal_radius, normal_flip, points, normals, false);
//-----------------------------------------------------------
// Sampling
pcu_uniform_sampling << < blocks, threads_per_block >> > (point_output_dev, normals_output_dev, width, height, cp_enabled, cuDevMem3f::DevParamsPtr(),
g_cu_random_sampling_dev[(g_current_random_pattern_index++)% max_number_of_random_patterns], point_output_dev, normals_output_dev);
cudaError err = cudaGetLastError();
if (err != 0) { std::cout << "\n[KNN] - points processing error.\n"; }
cudaDeviceSynchronize();
//---------------------------------------------------------------------------------
// Return the data
if (!to_host) return;
int output_size = width* height * 3 * sizeof(float); // three channels
cudaMemcpy(&points[0], point_output_dev, output_size, cudaMemcpyDeviceToHost);
cudaMemcpy(&normals[0], normals_output_dev, output_size, cudaMemcpyDeviceToHost);
}
/*
Set parameters for a cutting plane that removes points from the point set.
The plane is defined by A * x + B * y + C * z = D
// where a point gets removed if D - current_D > CP_THRESHOLD
*/
//static
void cuSample3f::SetCuttingPlaneParams(float a, float b, float c, float d, float threshold)
{
float* params = cuDevMem3f::DevParamsPtr();
float host_params[5];
host_params[0] = a;
host_params[1] = b;
host_params[2] = c;
host_params[3] = d;
host_params[4] = threshold;
cudaError err = cudaMemcpy(params, (float*)host_params, 5 * sizeof(float), cudaMemcpyHostToDevice);
if (err != 0) { std::cout << "\n[cuSample] - cudaMemcpy error.\n"; }
}
/*!
Set a point cloud filter methods.
@param method - can be NONE or BILATERAL
@param param - the parameters for the filter
*/
//static
void cuFilter3f::SetFilterMethod(FilterMethod method, FilterParams param)
{
cuFilter::Params p;
p.kernel_size = param.kernel_size;
p.sigmaI = param.sigmaI;
p.sigmaS = param.sigmaS;
cuFilter::SetBilateralFilterParams(p);
if(method == FilterMethod::NONE)
cuFilter::Enable(false);
else
cuFilter::Enable(true);
} |
20eecd851ccd67ef3ee8c43dc523ed14f1a404c1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Stan Tomov
@generated from magmablas/zgemv_conj.cu, normal z -> c, Mon Jun 25 18:24:11 2018
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define num_threads 256
__global__ void
cgemv_conj_kernel(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
if ( ind < m ) {
magmaFloatComplex res = MAGMA_C_ZERO;
#pragma unroll
for( int i=0; i < n; i ++ ) {
res += A[0] * MAGMA_C_CONJ(x[0]);
A += lda;
x += incx;
}
y[ind*incy] = alpha * res + beta * y[ind*incy];
}
}
/***************************************************************************//**
Purpose
-------
CGEMV_CONJ performs the matrix-vector operation
y := alpha*A*conj(x) + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha COMPLEX
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of dimension ( LDDA, n ) on the GPU.
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx COMPLEX array of dimension n
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy REAL array of dimension m
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv
*******************************************************************************/
extern "C" void
magmablas_cgemv_conj(
magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -5;
else if ( incx == 0 )
info = -7;
else if ( incy == 0 )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t blocks = magma_ceildiv( m, num_threads );
dim3 grid(blocks, 1, 1);
dim3 threads(num_threads, 1, 1);
hipLaunchKernelGGL(( cgemv_conj_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, n, alpha, dA, ldda, dx, incx, beta, dy, incy);
}
| 20eecd851ccd67ef3ee8c43dc523ed14f1a404c1.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Stan Tomov
@generated from magmablas/zgemv_conj.cu, normal z -> c, Mon Jun 25 18:24:11 2018
*/
#include "magma_internal.h"
#include "commonblas_c.h"
#define num_threads 256
__global__ void
cgemv_conj_kernel(
int m, int n, magmaFloatComplex alpha,
const magmaFloatComplex * __restrict__ A, int lda,
const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta,
magmaFloatComplex * __restrict__ y, int incy)
{
int ind = blockIdx.x*num_threads + threadIdx.x;
A += ind;
if ( ind < m ) {
magmaFloatComplex res = MAGMA_C_ZERO;
#pragma unroll
for( int i=0; i < n; i ++ ) {
res += A[0] * MAGMA_C_CONJ(x[0]);
A += lda;
x += incx;
}
y[ind*incy] = alpha * res + beta * y[ind*incy];
}
}
/***************************************************************************//**
Purpose
-------
CGEMV_CONJ performs the matrix-vector operation
y := alpha*A*conj(x) + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha COMPLEX
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA COMPLEX array of dimension ( LDDA, n ) on the GPU.
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx COMPLEX array of dimension n
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta DOUBLE REAL
On entry, BETA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy REAL array of dimension m
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv
*******************************************************************************/
extern "C" void
magmablas_cgemv_conj(
magma_int_t m, magma_int_t n, magmaFloatComplex alpha,
magmaFloatComplex_const_ptr dA, magma_int_t ldda,
magmaFloatComplex_const_ptr dx, magma_int_t incx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy, magma_int_t incy,
magma_queue_t queue )
{
magma_int_t info = 0;
if ( m < 0 )
info = -1;
else if ( n < 0 )
info = -2;
else if ( ldda < m )
info = -5;
else if ( incx == 0 )
info = -7;
else if ( incy == 0 )
info = -10;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
magma_int_t blocks = magma_ceildiv( m, num_threads );
dim3 grid(blocks, 1, 1);
dim3 threads(num_threads, 1, 1);
cgemv_conj_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
(m, n, alpha, dA, ldda, dx, incx, beta, dy, incy);
}
|
33a06a381bb9a3e08b3ab9c1f95a31e425dbe685.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/vec_distance.hpp"
#include "opencv2/gpu/device/datamov_utils.hpp"
namespace cv { namespace gpu { namespace device
{
namespace bf_knnmatch
{
///////////////////////////////////////////////////////////////////////////////
// Reduction
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance1, float& bestDistance2,
int& bestTrainIdx1, int& bestTrainIdx2,
float* s_distance, int* s_trainIdx)
{
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_distance[threadIdx.x] = bestDistance1;
s_trainIdx[threadIdx.x] = bestTrainIdx1;
__syncthreads();
if (threadIdx.x == 0)
{
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; ++i)
{
float val = s_distance[i];
if (val < myBestDistance1)
{
myBestDistance2 = myBestDistance1;
myBestTrainIdx2 = myBestTrainIdx1;
myBestDistance1 = val;
myBestTrainIdx1 = s_trainIdx[i];
}
else if (val < myBestDistance2)
{
myBestDistance2 = val;
myBestTrainIdx2 = s_trainIdx[i];
}
}
}
__syncthreads();
s_distance[threadIdx.x] = bestDistance2;
s_trainIdx[threadIdx.x] = bestTrainIdx2;
__syncthreads();
if (threadIdx.x == 0)
{
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; ++i)
{
float val = s_distance[i];
if (val < myBestDistance2)
{
myBestDistance2 = val;
myBestTrainIdx2 = s_trainIdx[i];
}
}
}
bestDistance1 = myBestDistance1;
bestDistance2 = myBestDistance2;
bestTrainIdx1 = myBestTrainIdx1;
bestTrainIdx2 = myBestTrainIdx2;
}
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance1, float& bestDistance2,
int& bestTrainIdx1, int& bestTrainIdx2,
int& bestImgIdx1, int& bestImgIdx2,
float* s_distance, int* s_trainIdx, int* s_imgIdx)
{
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
int myBestImgIdx1 = -1;
int myBestImgIdx2 = -1;
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_imgIdx += threadIdx.y * BLOCK_SIZE;
s_distance[threadIdx.x] = bestDistance1;
s_trainIdx[threadIdx.x] = bestTrainIdx1;
s_imgIdx[threadIdx.x] = bestImgIdx1;
__syncthreads();
if (threadIdx.x == 0)
{
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; ++i)
{
float val = s_distance[i];
if (val < myBestDistance1)
{
myBestDistance2 = myBestDistance1;
myBestTrainIdx2 = myBestTrainIdx1;
myBestImgIdx2 = myBestImgIdx1;
myBestDistance1 = val;
myBestTrainIdx1 = s_trainIdx[i];
myBestImgIdx1 = s_imgIdx[i];
}
else if (val < myBestDistance2)
{
myBestDistance2 = val;
myBestTrainIdx2 = s_trainIdx[i];
myBestImgIdx2 = s_imgIdx[i];
}
}
}
__syncthreads();
s_distance[threadIdx.x] = bestDistance2;
s_trainIdx[threadIdx.x] = bestTrainIdx2;
s_imgIdx[threadIdx.x] = bestImgIdx2;
__syncthreads();
if (threadIdx.x == 0)
{
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; ++i)
{
float val = s_distance[i];
if (val < myBestDistance2)
{
myBestDistance2 = val;
myBestTrainIdx2 = s_trainIdx[i];
myBestImgIdx2 = s_imgIdx[i];
}
}
}
bestDistance1 = myBestDistance1;
bestDistance2 = myBestDistance2;
bestTrainIdx1 = myBestTrainIdx1;
bestTrainIdx2 = myBestTrainIdx2;
bestImgIdx1 = myBestImgIdx1;
bestImgIdx2 = myBestImgIdx2;
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled Cached
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U>
__device__ void loadQueryToSmem(int queryIdx, const PtrStepSz<T>& query, U* s_query)
{
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolledCached(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance1, float& bestDistance2,
int& bestTrainIdx1, int& bestTrainIdx2,
int& bestImgIdx1, int& bestImgIdx2)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < train.cols)
{
T val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx))
{
if (distVal < bestDistance1)
{
bestImgIdx2 = bestImgIdx1;
bestDistance2 = bestDistance1;
bestTrainIdx2 = bestTrainIdx1;
bestImgIdx1 = imgIdx;
bestDistance1 = distVal;
bestTrainIdx1 = trainIdx;
}
else if (distVal < bestDistance2)
{
bestImgIdx2 = imgIdx;
bestDistance2 = distVal;
bestTrainIdx2 = trainIdx;
}
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
int myBestImgIdx1 = -1;
int myBestImgIdx2 = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolled(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance1, float& bestDistance2,
int& bestTrainIdx1, int& bestTrainIdx2,
int& bestImgIdx1, int& bestImgIdx2)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx))
{
if (distVal < bestDistance1)
{
bestImgIdx2 = bestImgIdx1;
bestDistance2 = bestDistance1;
bestTrainIdx2 = bestTrainIdx1;
bestImgIdx1 = imgIdx;
bestDistance1 = distVal;
bestTrainIdx1 = trainIdx;
}
else if (distVal < bestDistance2)
{
bestImgIdx2 = imgIdx;
bestDistance2 = distVal;
bestTrainIdx2 = trainIdx;
}
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
int myBestImgIdx1 = -1;
int myBestImgIdx2 = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__device__ void loop(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance1, float& bestDistance2,
int& bestTrainIdx1, int& bestTrainIdx2,
int& bestImgIdx1, int& bestImgIdx2)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx))
{
if (distVal < bestDistance1)
{
bestImgIdx2 = bestImgIdx1;
bestDistance2 = bestDistance1;
bestTrainIdx2 = bestTrainIdx1;
bestImgIdx1 = imgIdx;
bestDistance1 = distVal;
bestTrainIdx1 = trainIdx;
}
else if (distVal < bestDistance2)
{
bestImgIdx2 = imgIdx;
bestDistance2 = distVal;
bestTrainIdx2 = trainIdx;
}
}
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
int myBestImgIdx1 = -1;
int myBestImgIdx2 = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance,
hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( match<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// knnMatch 2 dispatcher
template <typename Dist, typename T, typename Mask>
void match2Dispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance,
int cc, hipStream_t stream)
{
(void)cc;
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}*/
else
{
match<16, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
}
template <typename Dist, typename T, typename Mask>
void match2Dispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
int cc, hipStream_t stream)
{
(void)cc;
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}*/
else
{
match<16, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// Calc distance kernel
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void calcDistanceUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, PtrStepf allDist)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y;
const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
if (loadX < query.cols)
{
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX];
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];
}
else
{
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
if (queryIdx < query.rows && trainIdx < train.rows)
{
float distVal = numeric_limits<float>::max();
if (mask(queryIdx, trainIdx))
distVal = (typename Dist::result_type)dist;
allDist.ptr(queryIdx)[trainIdx] = distVal;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void calcDistanceUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( calcDistanceUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, allDist);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void calcDistance(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, PtrStepf allDist)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y;
const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Dist dist;
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
if (loadX < query.cols)
{
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX];
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];
}
else
{
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
if (queryIdx < query.rows && trainIdx < train.rows)
{
float distVal = numeric_limits<float>::max();
if (mask(queryIdx, trainIdx))
distVal = (typename Dist::result_type)dist;
allDist.ptr(queryIdx)[trainIdx] = distVal;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void calcDistance(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
hipLaunchKernelGGL(( calcDistance<BLOCK_SIZE, Dist>), dim3(grid), dim3(block), smemSize, stream, query, train, mask, allDist);
cudaSafeCall( hipGetLastError() );
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Calc Distance dispatcher
template <typename Dist, typename T, typename Mask>
void calcDistanceDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzf& allDist,
int cc, hipStream_t stream)
{
(void)cc;
if (query.cols <= 64)
{
calcDistanceUnrolled<16, 64, Dist>(query, train, mask, allDist, stream);
}
else if (query.cols <= 128)
{
calcDistanceUnrolled<16, 128, Dist>(query, train, mask, allDist, stream);
}
/*else if (query.cols <= 256)
{
calcDistanceUnrolled<16, 256, Dist>(query, train, mask, allDist, stream);
}
else if (query.cols <= 512)
{
calcDistanceUnrolled<16, 512, Dist>(query, train, mask, allDist, stream);
}
else if (query.cols <= 1024)
{
calcDistanceUnrolled<16, 1024, Dist>(query, train, mask, allDist, stream);
}*/
else
{
calcDistance<16, Dist>(query, train, mask, allDist, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// find knn match kernel
template <int BLOCK_SIZE>
__global__ void findBestMatch(PtrStepSzf allDist, int i, PtrStepi trainIdx, PtrStepf distance)
{
const int SMEM_SIZE = BLOCK_SIZE > 64 ? BLOCK_SIZE : 64;
__shared__ float s_dist[SMEM_SIZE];
__shared__ int s_trainIdx[SMEM_SIZE];
const int queryIdx = blockIdx.x;
float* allDistRow = allDist.ptr(queryIdx);
float dist = numeric_limits<float>::max();
int bestIdx = -1;
for (int i = threadIdx.x; i < allDist.cols; i += BLOCK_SIZE)
{
float reg = allDistRow[i];
if (reg < dist)
{
dist = reg;
bestIdx = i;
}
}
s_dist[threadIdx.x] = dist;
s_trainIdx[threadIdx.x] = bestIdx;
__syncthreads();
reducePredVal<BLOCK_SIZE>(s_dist, dist, s_trainIdx, bestIdx, threadIdx.x, less<volatile float>());
if (threadIdx.x == 0)
{
if (dist < numeric_limits<float>::max())
{
allDistRow[bestIdx] = numeric_limits<float>::max();
trainIdx.ptr(queryIdx)[i] = bestIdx;
distance.ptr(queryIdx)[i] = dist;
}
}
}
template <int BLOCK_SIZE>
void findKnnMatch(int k, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, const PtrStepSzf& allDist, hipStream_t stream)
{
const dim3 block(BLOCK_SIZE, 1, 1);
const dim3 grid(trainIdx.rows, 1, 1);
for (int i = 0; i < k; ++i)
{
hipLaunchKernelGGL(( findBestMatch<BLOCK_SIZE>), dim3(grid), dim3(block), 0, stream, allDist, i, trainIdx, distance);
cudaSafeCall( hipGetLastError() );
}
if (stream == 0)
cudaSafeCall( hipDeviceSynchronize() );
}
void findKnnMatchDispatcher(int k, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream)
{
findKnnMatch<256>(k, static_cast<PtrStepSzi>(trainIdx), static_cast<PtrStepSzf>(distance), allDist, stream);
}
///////////////////////////////////////////////////////////////////////////////
// knn match Dispatcher
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, int k, const Mask& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
int cc, hipStream_t stream)
{
if (k == 2)
{
match2Dispatcher<Dist>(query, train, mask, trainIdx, distance, cc, stream);
}
else
{
calcDistanceDispatcher<Dist>(query, train, mask, allDist, cc, stream);
findKnnMatchDispatcher(k, trainIdx, distance, allDist, cc, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// knn match caller
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
int cc, hipStream_t stream)
{
if (mask.data)
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream);
else
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream);
}
template void matchL1_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
//template void matchL1_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
template void matchL1_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
template void matchL1_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
template void matchL1_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
template void matchL1_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
int cc, hipStream_t stream)
{
if (mask.data)
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream);
else
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream);
}
//template void matchL2_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
//template void matchL2_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
//template void matchL2_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
//template void matchL2_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
//template void matchL2_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
template void matchL2_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
int cc, hipStream_t stream)
{
if (mask.data)
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream);
else
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream);
}
template void matchHamming_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
//template void matchHamming_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
template void matchHamming_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
//template void matchHamming_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
template void matchHamming_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, hipStream_t stream);
template <typename T> void match2L1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
int cc, hipStream_t stream)
{
if (masks.data)
match2Dispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream);
else
match2Dispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);
}
template void match2L1_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
//template void match2L1_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
template void match2L1_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
template void match2L1_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
template void match2L1_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
template void match2L1_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
template <typename T> void match2L2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
int cc, hipStream_t stream)
{
if (masks.data)
match2Dispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream);
else
match2Dispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);
}
//template void match2L2_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
//template void match2L2_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
//template void match2L2_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
//template void match2L2_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
//template void match2L2_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
template void match2L2_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
template <typename T> void match2Hamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
int cc, hipStream_t stream)
{
if (masks.data)
match2Dispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream);
else
match2Dispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);
}
template void match2Hamming_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
//template void match2Hamming_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
template void match2Hamming_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
//template void match2Hamming_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
template void match2Hamming_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, hipStream_t stream);
} // namespace bf_knnmatch
}}} // namespace cv { namespace gpu { namespace device {
#endif /* CUDA_DISABLER */ | 33a06a381bb9a3e08b3ab9c1f95a31e425dbe685.cu | /*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include "internal_shared.hpp"
#include "opencv2/gpu/device/limits.hpp"
#include "opencv2/gpu/device/vec_distance.hpp"
#include "opencv2/gpu/device/datamov_utils.hpp"
namespace cv { namespace gpu { namespace device
{
namespace bf_knnmatch
{
///////////////////////////////////////////////////////////////////////////////
// Reduction
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance1, float& bestDistance2,
int& bestTrainIdx1, int& bestTrainIdx2,
float* s_distance, int* s_trainIdx)
{
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_distance[threadIdx.x] = bestDistance1;
s_trainIdx[threadIdx.x] = bestTrainIdx1;
__syncthreads();
if (threadIdx.x == 0)
{
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; ++i)
{
float val = s_distance[i];
if (val < myBestDistance1)
{
myBestDistance2 = myBestDistance1;
myBestTrainIdx2 = myBestTrainIdx1;
myBestDistance1 = val;
myBestTrainIdx1 = s_trainIdx[i];
}
else if (val < myBestDistance2)
{
myBestDistance2 = val;
myBestTrainIdx2 = s_trainIdx[i];
}
}
}
__syncthreads();
s_distance[threadIdx.x] = bestDistance2;
s_trainIdx[threadIdx.x] = bestTrainIdx2;
__syncthreads();
if (threadIdx.x == 0)
{
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; ++i)
{
float val = s_distance[i];
if (val < myBestDistance2)
{
myBestDistance2 = val;
myBestTrainIdx2 = s_trainIdx[i];
}
}
}
bestDistance1 = myBestDistance1;
bestDistance2 = myBestDistance2;
bestTrainIdx1 = myBestTrainIdx1;
bestTrainIdx2 = myBestTrainIdx2;
}
template <int BLOCK_SIZE>
__device__ void findBestMatch(float& bestDistance1, float& bestDistance2,
int& bestTrainIdx1, int& bestTrainIdx2,
int& bestImgIdx1, int& bestImgIdx2,
float* s_distance, int* s_trainIdx, int* s_imgIdx)
{
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
int myBestImgIdx1 = -1;
int myBestImgIdx2 = -1;
s_distance += threadIdx.y * BLOCK_SIZE;
s_trainIdx += threadIdx.y * BLOCK_SIZE;
s_imgIdx += threadIdx.y * BLOCK_SIZE;
s_distance[threadIdx.x] = bestDistance1;
s_trainIdx[threadIdx.x] = bestTrainIdx1;
s_imgIdx[threadIdx.x] = bestImgIdx1;
__syncthreads();
if (threadIdx.x == 0)
{
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; ++i)
{
float val = s_distance[i];
if (val < myBestDistance1)
{
myBestDistance2 = myBestDistance1;
myBestTrainIdx2 = myBestTrainIdx1;
myBestImgIdx2 = myBestImgIdx1;
myBestDistance1 = val;
myBestTrainIdx1 = s_trainIdx[i];
myBestImgIdx1 = s_imgIdx[i];
}
else if (val < myBestDistance2)
{
myBestDistance2 = val;
myBestTrainIdx2 = s_trainIdx[i];
myBestImgIdx2 = s_imgIdx[i];
}
}
}
__syncthreads();
s_distance[threadIdx.x] = bestDistance2;
s_trainIdx[threadIdx.x] = bestTrainIdx2;
s_imgIdx[threadIdx.x] = bestImgIdx2;
__syncthreads();
if (threadIdx.x == 0)
{
#pragma unroll
for (int i = 0; i < BLOCK_SIZE; ++i)
{
float val = s_distance[i];
if (val < myBestDistance2)
{
myBestDistance2 = val;
myBestTrainIdx2 = s_trainIdx[i];
myBestImgIdx2 = s_imgIdx[i];
}
}
}
bestDistance1 = myBestDistance1;
bestDistance2 = myBestDistance2;
bestTrainIdx1 = myBestTrainIdx1;
bestTrainIdx2 = myBestTrainIdx2;
bestImgIdx1 = myBestImgIdx1;
bestImgIdx2 = myBestImgIdx2;
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled Cached
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename T, typename U>
__device__ void loadQueryToSmem(int queryIdx, const PtrStepSz<T>& query, U* s_query)
{
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * MAX_DESC_LEN + loadX] = loadX < query.cols ? query.ptr(::min(queryIdx, query.rows - 1))[loadX] : 0;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolledCached(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance1, float& bestDistance2,
int& bestTrainIdx1, int& bestTrainIdx2,
int& bestImgIdx1, int& bestImgIdx2)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < train.cols)
{
T val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * MAX_DESC_LEN + i * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx))
{
if (distVal < bestDistance1)
{
bestImgIdx2 = bestImgIdx1;
bestDistance2 = bestDistance1;
bestTrainIdx2 = bestTrainIdx1;
bestImgIdx1 = imgIdx;
bestDistance1 = distVal;
bestTrainIdx1 = trainIdx;
}
else if (distVal < bestDistance2)
{
bestImgIdx2 = imgIdx;
bestDistance2 = distVal;
bestTrainIdx2 = trainIdx;
}
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= BLOCK_SIZE ? MAX_DESC_LEN : BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolledCached(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * MAX_DESC_LEN);
loadQueryToSmem<BLOCK_SIZE, MAX_DESC_LEN>(queryIdx, query, s_query);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
int myBestImgIdx1 = -1;
int myBestImgIdx2 = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolledCached(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (BLOCK_SIZE * (MAX_DESC_LEN >= 2 * BLOCK_SIZE ? MAX_DESC_LEN : 2 * BLOCK_SIZE) + BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolledCached<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match Unrolled
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolled(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance1, float& bestDistance2,
int& bestTrainIdx1, int& bestTrainIdx2,
int& bestImgIdx1, int& bestImgIdx2)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx))
{
if (distVal < bestDistance1)
{
bestImgIdx2 = bestImgIdx1;
bestDistance2 = bestDistance1;
bestTrainIdx2 = bestTrainIdx1;
bestImgIdx1 = imgIdx;
bestDistance1 = distVal;
bestTrainIdx1 = trainIdx;
}
else if (distVal < bestDistance2)
{
bestImgIdx2 = imgIdx;
bestDistance2 = distVal;
bestTrainIdx2 = trainIdx;
}
}
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void matchUnrolled(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
int myBestImgIdx1 = -1;
int myBestImgIdx2 = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loopUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void matchUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
matchUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Match
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__device__ void loop(int queryIdx, const PtrStepSz<T>& query, int imgIdx, const PtrStepSz<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance1, float& bestDistance2,
int& bestTrainIdx1, int& bestTrainIdx2,
int& bestImgIdx1, int& bestImgIdx2)
{
for (int t = 0, endt = (train.rows + BLOCK_SIZE - 1) / BLOCK_SIZE; t < endt; ++t)
{
Dist dist;
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
if (loadX < query.cols)
{
T val;
ForceGlob<T>::Load(query.ptr(::min(queryIdx, query.rows - 1)), loadX, val);
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = val;
ForceGlob<T>::Load(train.ptr(::min(t * BLOCK_SIZE + threadIdx.y, train.rows - 1)), loadX, val);
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = val;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
typename Dist::result_type distVal = dist;
const int trainIdx = t * BLOCK_SIZE + threadIdx.x;
if (queryIdx < query.rows && trainIdx < train.rows && mask(queryIdx, trainIdx))
{
if (distVal < bestDistance1)
{
bestImgIdx2 = bestImgIdx1;
bestDistance2 = bestDistance1;
bestTrainIdx2 = bestTrainIdx1;
bestImgIdx1 = imgIdx;
bestDistance1 = distVal;
bestTrainIdx1 = trainIdx;
}
else if (distVal < bestDistance2)
{
bestImgIdx2 = imgIdx;
bestDistance2 = distVal;
bestTrainIdx2 = trainIdx;
}
}
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, int2* bestTrainIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
loop<BLOCK_SIZE, Dist>(queryIdx, query, 0, train, mask, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestTrainIdx1, myBestTrainIdx2);
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, s_distance, s_trainIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<float2>& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, trainIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void match(const PtrStepSz<T> query, const PtrStepSz<T>* trains, int n, const Mask mask, int2* bestTrainIdx, int2* bestImgIdx, float2* bestDistance)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.x * BLOCK_SIZE + threadIdx.y;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
float myBestDistance1 = numeric_limits<float>::max();
float myBestDistance2 = numeric_limits<float>::max();
int myBestTrainIdx1 = -1;
int myBestTrainIdx2 = -1;
int myBestImgIdx1 = -1;
int myBestImgIdx2 = -1;
Mask m = mask;
for (int imgIdx = 0; imgIdx < n; ++imgIdx)
{
const PtrStepSz<T> train = trains[imgIdx];
m.next();
loop<BLOCK_SIZE, Dist>(queryIdx, query, imgIdx, train, m, s_query, s_train, myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2);
}
__syncthreads();
float* s_distance = (float*)(smem);
int* s_trainIdx = (int*)(smem + BLOCK_SIZE * BLOCK_SIZE);
int* s_imgIdx = (int*)(smem + 2 * BLOCK_SIZE * BLOCK_SIZE);
findBestMatch<BLOCK_SIZE>(myBestDistance1, myBestDistance2, myBestTrainIdx1, myBestTrainIdx2, myBestImgIdx1, myBestImgIdx2, s_distance, s_trainIdx, s_imgIdx);
if (queryIdx < query.rows && threadIdx.x == 0)
{
bestTrainIdx[queryIdx] = make_int2(myBestTrainIdx1, myBestTrainIdx2);
bestImgIdx[queryIdx] = make_int2(myBestImgIdx1, myBestImgIdx2);
bestDistance[queryIdx] = make_float2(myBestDistance1, myBestDistance2);
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void match(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSz<int2>& trainIdx, const PtrStepSz<int2>& imgIdx, const PtrStepSz<float2>& distance,
cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (3 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
match<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, trains, n, mask, trainIdx.data, imgIdx.data, distance.data);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// knnMatch 2 dispatcher
template <typename Dist, typename T, typename Mask>
void match2Dispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance,
int cc, cudaStream_t stream)
{
(void)cc;
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}*/
else
{
match<16, Dist>(query, train, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
}
template <typename Dist, typename T, typename Mask>
void match2Dispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>* trains, int n, const Mask& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
int cc, cudaStream_t stream)
{
(void)cc;
if (query.cols <= 64)
{
matchUnrolledCached<16, 64, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 128)
{
matchUnrolledCached<16, 128, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
/*else if (query.cols <= 256)
{
matchUnrolled<16, 256, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 512)
{
matchUnrolled<16, 512, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
else if (query.cols <= 1024)
{
matchUnrolled<16, 1024, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}*/
else
{
match<16, Dist>(query, trains, n, mask, static_cast< PtrStepSz<int2> >(trainIdx), static_cast< PtrStepSz<int2> >(imgIdx), static_cast< PtrStepSz<float2> > (distance), stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// Calc distance kernel
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__global__ void calcDistanceUnrolled(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, PtrStepf allDist)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y;
const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Dist dist;
#pragma unroll
for (int i = 0; i < MAX_DESC_LEN / BLOCK_SIZE; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
if (loadX < query.cols)
{
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX];
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];
}
else
{
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
if (queryIdx < query.rows && trainIdx < train.rows)
{
float distVal = numeric_limits<float>::max();
if (mask(queryIdx, trainIdx))
distVal = (typename Dist::result_type)dist;
allDist.ptr(queryIdx)[trainIdx] = distVal;
}
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
void calcDistanceUnrolled(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
calcDistanceUnrolled<BLOCK_SIZE, MAX_DESC_LEN, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, allDist);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__global__ void calcDistance(const PtrStepSz<T> query, const PtrStepSz<T> train, const Mask mask, PtrStepf allDist)
{
extern __shared__ int smem[];
const int queryIdx = blockIdx.y * BLOCK_SIZE + threadIdx.y;
const int trainIdx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
typename Dist::value_type* s_query = (typename Dist::value_type*)(smem);
typename Dist::value_type* s_train = (typename Dist::value_type*)(smem + BLOCK_SIZE * BLOCK_SIZE);
Dist dist;
for (int i = 0, endi = (query.cols + BLOCK_SIZE - 1) / BLOCK_SIZE; i < endi; ++i)
{
const int loadX = threadIdx.x + i * BLOCK_SIZE;
if (loadX < query.cols)
{
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = query.ptr(::min(queryIdx, query.rows - 1))[loadX];
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = train.ptr(::min(blockIdx.x * BLOCK_SIZE + threadIdx.y, train.rows - 1))[loadX];
}
else
{
s_query[threadIdx.y * BLOCK_SIZE + threadIdx.x] = 0;
s_train[threadIdx.x * BLOCK_SIZE + threadIdx.y] = 0;
}
__syncthreads();
#pragma unroll
for (int j = 0; j < BLOCK_SIZE; ++j)
dist.reduceIter(s_query[threadIdx.y * BLOCK_SIZE + j], s_train[j * BLOCK_SIZE + threadIdx.x]);
__syncthreads();
}
if (queryIdx < query.rows && trainIdx < train.rows)
{
float distVal = numeric_limits<float>::max();
if (mask(queryIdx, trainIdx))
distVal = (typename Dist::result_type)dist;
allDist.ptr(queryIdx)[trainIdx] = distVal;
}
}
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
void calcDistance(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask, const PtrStepSzf& allDist, cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, BLOCK_SIZE);
const dim3 grid(divUp(train.rows, BLOCK_SIZE), divUp(query.rows, BLOCK_SIZE));
const size_t smemSize = (2 * BLOCK_SIZE * BLOCK_SIZE) * sizeof(int);
calcDistance<BLOCK_SIZE, Dist><<<grid, block, smemSize, stream>>>(query, train, mask, allDist);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
///////////////////////////////////////////////////////////////////////////////
// Calc Distance dispatcher
template <typename Dist, typename T, typename Mask>
void calcDistanceDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, const Mask& mask,
const PtrStepSzf& allDist,
int cc, cudaStream_t stream)
{
(void)cc;
if (query.cols <= 64)
{
calcDistanceUnrolled<16, 64, Dist>(query, train, mask, allDist, stream);
}
else if (query.cols <= 128)
{
calcDistanceUnrolled<16, 128, Dist>(query, train, mask, allDist, stream);
}
/*else if (query.cols <= 256)
{
calcDistanceUnrolled<16, 256, Dist>(query, train, mask, allDist, stream);
}
else if (query.cols <= 512)
{
calcDistanceUnrolled<16, 512, Dist>(query, train, mask, allDist, stream);
}
else if (query.cols <= 1024)
{
calcDistanceUnrolled<16, 1024, Dist>(query, train, mask, allDist, stream);
}*/
else
{
calcDistance<16, Dist>(query, train, mask, allDist, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// find knn match kernel
template <int BLOCK_SIZE>
__global__ void findBestMatch(PtrStepSzf allDist, int i, PtrStepi trainIdx, PtrStepf distance)
{
const int SMEM_SIZE = BLOCK_SIZE > 64 ? BLOCK_SIZE : 64;
__shared__ float s_dist[SMEM_SIZE];
__shared__ int s_trainIdx[SMEM_SIZE];
const int queryIdx = blockIdx.x;
float* allDistRow = allDist.ptr(queryIdx);
float dist = numeric_limits<float>::max();
int bestIdx = -1;
for (int i = threadIdx.x; i < allDist.cols; i += BLOCK_SIZE)
{
float reg = allDistRow[i];
if (reg < dist)
{
dist = reg;
bestIdx = i;
}
}
s_dist[threadIdx.x] = dist;
s_trainIdx[threadIdx.x] = bestIdx;
__syncthreads();
reducePredVal<BLOCK_SIZE>(s_dist, dist, s_trainIdx, bestIdx, threadIdx.x, less<volatile float>());
if (threadIdx.x == 0)
{
if (dist < numeric_limits<float>::max())
{
allDistRow[bestIdx] = numeric_limits<float>::max();
trainIdx.ptr(queryIdx)[i] = bestIdx;
distance.ptr(queryIdx)[i] = dist;
}
}
}
template <int BLOCK_SIZE>
void findKnnMatch(int k, const PtrStepSzi& trainIdx, const PtrStepSzf& distance, const PtrStepSzf& allDist, cudaStream_t stream)
{
const dim3 block(BLOCK_SIZE, 1, 1);
const dim3 grid(trainIdx.rows, 1, 1);
for (int i = 0; i < k; ++i)
{
findBestMatch<BLOCK_SIZE><<<grid, block, 0, stream>>>(allDist, i, trainIdx, distance);
cudaSafeCall( cudaGetLastError() );
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
void findKnnMatchDispatcher(int k, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream)
{
findKnnMatch<256>(k, static_cast<PtrStepSzi>(trainIdx), static_cast<PtrStepSzf>(distance), allDist, stream);
}
///////////////////////////////////////////////////////////////////////////////
// knn match Dispatcher
template <typename Dist, typename T, typename Mask>
void matchDispatcher(const PtrStepSz<T>& query, const PtrStepSz<T>& train, int k, const Mask& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
int cc, cudaStream_t stream)
{
if (k == 2)
{
match2Dispatcher<Dist>(query, train, mask, trainIdx, distance, cc, stream);
}
else
{
calcDistanceDispatcher<Dist>(query, train, mask, allDist, cc, stream);
findKnnMatchDispatcher(k, trainIdx, distance, allDist, cc, stream);
}
}
///////////////////////////////////////////////////////////////////////////////
// knn match caller
template <typename T> void matchL1_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
int cc, cudaStream_t stream)
{
if (mask.data)
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream);
else
matchDispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream);
}
template void matchL1_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
//template void matchL1_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
template void matchL1_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
template void matchL1_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
template void matchL1_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
template void matchL1_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
template <typename T> void matchL2_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
int cc, cudaStream_t stream)
{
if (mask.data)
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream);
else
matchDispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream);
}
//template void matchL2_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
//template void matchL2_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
//template void matchL2_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
//template void matchL2_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
//template void matchL2_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
template void matchL2_gpu<float >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
template <typename T> void matchHamming_gpu(const PtrStepSzb& query, const PtrStepSzb& train, int k, const PtrStepSzb& mask,
const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist,
int cc, cudaStream_t stream)
{
if (mask.data)
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, SingleMask(mask), trainIdx, distance, allDist, cc, stream);
else
matchDispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), static_cast< PtrStepSz<T> >(train), k, WithOutMask(), trainIdx, distance, allDist, cc, stream);
}
template void matchHamming_gpu<uchar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
//template void matchHamming_gpu<schar >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
template void matchHamming_gpu<ushort>(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
//template void matchHamming_gpu<short >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
template void matchHamming_gpu<int >(const PtrStepSzb& queryDescs, const PtrStepSzb& trainDescs, int k, const PtrStepSzb& mask, const PtrStepSzb& trainIdx, const PtrStepSzb& distance, const PtrStepSzf& allDist, int cc, cudaStream_t stream);
template <typename T> void match2L1_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
int cc, cudaStream_t stream)
{
if (masks.data)
match2Dispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream);
else
match2Dispatcher< L1Dist<T> >(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);
}
template void match2L1_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
//template void match2L1_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
template void match2L1_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
template void match2L1_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
template void match2L1_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
template void match2L1_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
template <typename T> void match2L2_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
int cc, cudaStream_t stream)
{
if (masks.data)
match2Dispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream);
else
match2Dispatcher<L2Dist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);
}
//template void match2L2_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
//template void match2L2_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
//template void match2L2_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
//template void match2L2_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
//template void match2L2_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzi& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
template void match2L2_gpu<float >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
template <typename T> void match2Hamming_gpu(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks,
const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance,
int cc, cudaStream_t stream)
{
if (masks.data)
match2Dispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, MaskCollection(masks.data), trainIdx, imgIdx, distance, cc, stream);
else
match2Dispatcher<HammingDist>(static_cast< PtrStepSz<T> >(query), (const PtrStepSz<T>*)trains.ptr(), trains.cols, WithOutMask(), trainIdx, imgIdx, distance, cc, stream);
}
template void match2Hamming_gpu<uchar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
//template void match2Hamming_gpu<schar >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
template void match2Hamming_gpu<ushort>(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
//template void match2Hamming_gpu<short >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
template void match2Hamming_gpu<int >(const PtrStepSzb& query, const PtrStepSzb& trains, const PtrStepSz<PtrStepb>& masks, const PtrStepSzb& trainIdx, const PtrStepSzb& imgIdx, const PtrStepSzb& distance, int cc, cudaStream_t stream);
} // namespace bf_knnmatch
}}} // namespace cv { namespace gpu { namespace device {
#endif /* CUDA_DISABLER */ |
7a4ee21633ca08a75114055b74ffa84ee75bfdcf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "proton_utils.h"
#include "proton_kernels.h"
#include "vector_td_utilities.h"
#include "setup_grid.h"
#include "cudaDeviceManager.h"
namespace Gadgetron {
static size_t calculate_batch_size(size_t floats_per_proton = 14){
size_t mem_per_proton = floats_per_proton*sizeof(float); // Need 12 floatS for the splines and 1 for the projection
size_t free;
size_t total;
int res = hipMemGetInfo(&free,&total);
return 1024*1024*(free/(1024*1024*mem_per_proton)); //Divisons by 1024*1024 to ensure MB batch size
}
void rotate_splines(cuNDArray<floatd3> * splines,float angle){
unsigned int elements = splines->get_number_of_elements()/4;
unsigned int threadsPerBlock =::min(elements,(unsigned int) cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(::min(totalBlocksPerGrid,cudaDeviceManager::Instance()->max_griddim()));
// Invoke kernel
int batchSize = dimGrid.x*dimBlock.x;
//std::cout << "Starting forward kernel with grid " << dimGrid.x << " " << dimGrid.y << " " << dimGrid.z << std::endl;
for (unsigned int offset = 0; offset < (elements+batchSize); offset += batchSize){
hipLaunchKernelGGL(( rotate_splines_kernel<float>), dim3(dimGrid), dim3(dimBlock) , 0, 0, splines->get_data_ptr(),angle,elements,offset);
}
hipDeviceSynchronize();
CHECK_FOR_CUDA_ERROR();
}
// Expand and fill with nearest value
template<class T, unsigned int D>
__global__ void pad_nearest_kernel( vector_td<unsigned int,D> matrix_size_in, vector_td<unsigned int,D> matrix_size_out,
const T * __restrict__ in, T * __restrict__ out, unsigned int number_of_batches, unsigned int num_elements)
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const unsigned int frame_offset = idx/num_elements;
if( idx < num_elements*number_of_batches ){
const typename uintd<D>::Type co_out = idx_to_co<D>( idx-frame_offset*num_elements, matrix_size_out );
const typename uintd<D>::Type offset = (matrix_size_out-matrix_size_in)>>1;
T _out;
bool inside = (co_out>=offset) && (co_out<(matrix_size_in+offset));
if( inside )
_out = in[co_to_idx<D>(co_out-offset, matrix_size_in)+frame_offset*prod(matrix_size_in)];
else{
const vector_td<unsigned int,D> co_in = amax(amin(co_out,matrix_size_in-1u),0u);
_out = in[co_to_idx<D>(co_in,matrix_size_in)+frame_offset*prod(matrix_size_in)];
}
out[idx] = _out;
}
}
template<class T, unsigned int D>
void pad_nearest( cuNDArray<T> *in, cuNDArray<T> *out )
{
if( in == 0x0 || out == 0x0 ){
throw std::runtime_error("pad: 0x0 ndarray provided");;
}
if( in->get_number_of_dimensions() != out->get_number_of_dimensions() ){
throw std::runtime_error("pad: image dimensions mismatch");;
}
if( in->get_number_of_dimensions() < D ){
std::stringstream ss;
ss << "pad: number of image dimensions should be at least " << D;
throw std::runtime_error(ss.str());;
}
typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in->get_dimensions() );
typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *out->get_dimensions() );
unsigned int number_of_batches = 1;
for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ ){
number_of_batches *= in->get_size(d);
}
if( weak_greater(matrix_size_in,matrix_size_out) ){
throw std::runtime_error("pad: size mismatch, cannot expand");
}
// Setup block/grid dimensions
dim3 blockDim; dim3 gridDim;
setup_grid( prod(matrix_size_out), &blockDim, &gridDim, number_of_batches );
// Invoke kernel
hipLaunchKernelGGL(( pad_nearest_kernel<T,D>), dim3(gridDim), dim3(blockDim) , 0, 0,
vector_td<unsigned int,D>(matrix_size_in), vector_td<unsigned int,D>(matrix_size_out),
in->get_data_ptr(), out->get_data_ptr(), number_of_batches, prod(matrix_size_out) );
CHECK_FOR_CUDA_ERROR();
}
template<> void protonProjection<cuNDArray>(cuNDArray<float>* image, cuNDArray<float>* projections, cuNDArray<floatd3>* splines, floatd3 phys_dims, cuNDArray<float>* exterior_path_lengths){
int dims = projections->get_number_of_elements();
int threadsPerBlock =::min(dims,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = (dims+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(::min(totalBlocksPerGrid,cudaDeviceManager::Instance()->max_griddim()));
uint64d3 _dims = from_std_vector<size_t,3>( *(image->get_dimensions()) );
// Invoke kernel
int batchSize = dimGrid.x*dimBlock.x;
//std::cout << "Starting forward kernel with grid " << dimGrid.x << " " << dimGrid.y << " " << dimGrid.z << std::endl;
hipFuncSetCacheConfig(path_kernel2<float,forward_functor<float> >, hipFuncCachePreferL1);
hipFuncSetCacheConfig(path_kernel<float,forward_functor<float> >, hipFuncCachePreferL1);
for (int offset = 0; offset < (dims+batchSize); offset += batchSize){
forward_functor<float> functor(image->get_data_ptr(),projections->get_data_ptr());
if (exterior_path_lengths != NULL){
hipLaunchKernelGGL(( path_kernel2<float,forward_functor<float> >), dim3(dimGrid), dim3(dimBlock) , 0, 0, functor,splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
//forward_kernel2<float><<< dimGrid, dimBlock >>> (image->get_data_ptr(), projections->get_data_ptr(),splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}else
hipLaunchKernelGGL(( path_kernel<float,forward_functor<float> >), dim3(dimGrid), dim3(dimBlock) , 0, 0, functor,splines->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}
//hipDeviceSynchronize();
//CHECK_FOR_CUDA_ERROR();
}
template<> void protonPathNorm<cuNDArray>(std::vector<size_t> img_dims, cuNDArray<float>* projections, cuNDArray<floatd3>* splines, floatd3 phys_dims, cuNDArray<float>* exterior_path_lengths){
int dims = projections->get_number_of_elements();
int threadsPerBlock =::min(dims,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = (dims+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(::min(totalBlocksPerGrid,cudaDeviceManager::Instance()->max_griddim()));
uint64d3 _dims = from_std_vector<size_t,3>( img_dims );
// Invoke kernel
int batchSize = dimGrid.x*dimBlock.x;
//std::cout << "Starting forward kernel with grid " << dimGrid.x << " " << dimGrid.y << " " << dimGrid.z << std::endl;
hipFuncSetCacheConfig(path_kernel2<float,forward_functor<float> >, hipFuncCachePreferL1);
hipFuncSetCacheConfig(path_kernel<float,forward_functor<float> >, hipFuncCachePreferL1);
for (int offset = 0; offset < (dims+batchSize); offset += batchSize){
forward_norm_functor<float> functor(projections->get_data_ptr());
if (exterior_path_lengths != NULL){
hipLaunchKernelGGL(( path_kernel2<float,forward_norm_functor<float> >), dim3(dimGrid), dim3(dimBlock) , 0, 0, functor,splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
//forward_kernel2<float><<< dimGrid, dimBlock >>> (image->get_data_ptr(), projections->get_data_ptr(),splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}else
hipLaunchKernelGGL(( path_kernel<float,forward_norm_functor<float> >), dim3(dimGrid), dim3(dimBlock) , 0, 0, functor,splines->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}
//hipDeviceSynchronize();
//CHECK_FOR_CUDA_ERROR();
}
template<> void protonProjection<hoCuNDArray>(hoCuNDArray<float> * image,hoCuNDArray<float> * projections, hoCuNDArray<floatd3> * splines, floatd3 phys_dims, hoCuNDArray<float>* exterior_path_lengths){
size_t floats_per_proton = exterior_path_lengths == NULL ? 13 : 15;
size_t max_batch_size = calculate_batch_size(floats_per_proton);
size_t elements = projections->get_number_of_elements();
size_t offset = 0;
cuNDArray<float> cu_image(image);
for (size_t n = 0; n < (elements+max_batch_size-1)/max_batch_size; n++){
size_t batch_size = ::min(max_batch_size,elements-offset);
std::vector<size_t> batch_dim;
batch_dim.push_back(batch_size);
hoCuNDArray<float> projections_view(&batch_dim,projections->get_data_ptr()+offset); //This creates a "view" of out
cuNDArray<float> cu_projections(&projections_view);
batch_dim.push_back(4);
hoCuNDArray<vector_td<float,3> > splines_view(&batch_dim,splines->get_data_ptr()+offset*4); // This creates a "view" of splines
cuNDArray<vector_td<float,3> > cu_splines(&splines_view);
if (exterior_path_lengths != NULL){
batch_dim.back() = 2;
hoCuNDArray<float> EPL_view(&batch_dim,exterior_path_lengths->get_data_ptr()+offset*2); //This creates a "view" of out
cuNDArray<float> cu_EPL(&EPL_view);
protonProjection(&cu_image,&cu_projections, &cu_splines,phys_dims,&cu_EPL);
}else
protonProjection(&cu_image,&cu_projections, &cu_splines,phys_dims);
hipMemcpy(projections_view.get_data_ptr(),cu_projections.get_data_ptr(),batch_size*sizeof(float),hipMemcpyDeviceToHost); //Copies back the data to the host
offset += batch_size;
}
}
template<> void protonBackprojection<cuNDArray>(cuNDArray<float> * image, cuNDArray<float> * projections, cuNDArray<floatd3>* splines, floatd3 phys_dims, cuNDArray<float>* exterior_path_lengths){
int dims = projections->get_number_of_elements();
int threadsPerBlock =::min(dims,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = (dims+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(::min(totalBlocksPerGrid,cudaDeviceManager::Instance()->max_griddim()));
uint64d3 _dims = from_std_vector<size_t,3>( *(image->get_dimensions().get()) );
// Invoke kernel
int batchSize = dimBlock.x*dimGrid.x;
hipFuncSetCacheConfig(path_kernel2<float,backward_functor<float> >, hipFuncCachePreferL1);
hipFuncSetCacheConfig(path_kernel<float,backward_functor<float> >, hipFuncCachePreferL1);
for (int offset = 0; offset < (dims+batchSize); offset += batchSize){
backward_functor<float> functor(image->get_data_ptr(),projections->get_data_ptr());
if (exterior_path_lengths != NULL){
hipLaunchKernelGGL(( path_kernel2<float,backward_functor<float> >), dim3(dimGrid), dim3(dimBlock) , 0, 0, functor,splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
//backwards_kernel2<float><<< dimGrid, dimBlock >>> (projections->get_data_ptr(), image->get_data_ptr(),splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}else
hipLaunchKernelGGL(( path_kernel<float, backward_functor<float> >), dim3(dimGrid), dim3(dimBlock) , 0, 0, functor,splines->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}
}
template<> void countProtonsPerVoxel<cuNDArray>(cuNDArray<float> * image, cuNDArray<floatd3>* splines, floatd3 phys_dims, cuNDArray<float>* exterior_path_lengths){
int dims = splines->get_number_of_elements()/4;
int threadsPerBlock =::min(dims,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = (dims+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(::min(totalBlocksPerGrid,cudaDeviceManager::Instance()->max_griddim()));
uint64d3 _dims = from_std_vector<size_t,3>( *(image->get_dimensions().get()) );
// Invoke kernel
int batchSize = dimBlock.x*dimGrid.x;
hipFuncSetCacheConfig(path_kernel2<float,backward_functor<float> >, hipFuncCachePreferL1);
hipFuncSetCacheConfig(path_kernel<float,backward_functor<float> >, hipFuncCachePreferL1);
for (int offset = 0; offset < (dims+batchSize); offset += batchSize){
backward_counting_functor<float> functor(image->get_data_ptr());
if (exterior_path_lengths != NULL){
hipLaunchKernelGGL(( path_kernel2<float,backward_counting_functor<float> >), dim3(dimGrid), dim3(dimBlock) , 0, 0, functor,splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
//backwards_kernel2<float><<< dimGrid, dimBlock >>> (projections->get_data_ptr(), image->get_data_ptr(),splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}else
hipLaunchKernelGGL(( path_kernel<float, backward_counting_functor<float> >), dim3(dimGrid), dim3(dimBlock) , 0, 0, functor,splines->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}
}
template<> void countProtonsPerVoxel<hoCuNDArray>(hoCuNDArray<float>* image, hoCuNDArray<floatd3>* splines, floatd3 phys_dims, hoCuNDArray<float>* exterior_path_lengths) {
cuNDArray<float> cu_image(image);
CHECK_FOR_CUDA_ERROR();
size_t floats_per_proton = exterior_path_lengths == NULL ? 13 : 15;
size_t max_batch_size = calculate_batch_size(floats_per_proton);
size_t elements = splines->get_number_of_elements()/4;
size_t offset = 0;
for (size_t n = 0; n < (elements+max_batch_size-1)/max_batch_size; n++){
size_t batch_size = ::min(max_batch_size,elements-offset);
std::vector<size_t> batch_dim;
batch_dim.push_back(batch_size);
batch_dim.push_back(4);
hoCuNDArray<floatd3 > splines_view(&batch_dim,splines->get_data_ptr()+offset*4); // This creates a "view" of splines
cuNDArray<floatd3 > cu_splines(&splines_view);
if (exterior_path_lengths != NULL){
batch_dim.back() = 2;
hoCuNDArray<float> EPL_view(&batch_dim,exterior_path_lengths->get_data_ptr()+offset*2); //This creates a "view" of projections
cuNDArray<float> cu_EPL(&EPL_view);
countProtonsPerVoxel<cuNDArray>(&cu_image,&cu_splines,phys_dims,&cu_EPL);
} else
countProtonsPerVoxel<cuNDArray>(&cu_image,&cu_splines,phys_dims);
CHECK_FOR_CUDA_ERROR();
offset += batch_size;
}
hipMemcpy(image->get_data_ptr(),cu_image.get_data_ptr(),cu_image.get_number_of_elements()*sizeof(float),hipMemcpyDeviceToHost);
}
template<> void protonPathNorm<hoCuNDArray>(std::vector<size_t> img_dims,hoCuNDArray<float> * projections, hoCuNDArray<floatd3> * splines, floatd3 phys_dims, hoCuNDArray<float>* exterior_path_lengths){
size_t floats_per_proton = exterior_path_lengths == NULL ? 13 : 15;
size_t max_batch_size = calculate_batch_size(floats_per_proton);
size_t elements = projections->get_number_of_elements();
size_t offset = 0;
for (size_t n = 0; n < (elements+max_batch_size-1)/max_batch_size; n++){
size_t batch_size = ::min(max_batch_size,elements-offset);
std::vector<size_t> batch_dim;
batch_dim.push_back(batch_size);
hoCuNDArray<float> projections_view(&batch_dim,projections->get_data_ptr()+offset); //This creates a "view" of out
cuNDArray<float> cu_projections(&projections_view);
batch_dim.push_back(4);
hoCuNDArray<vector_td<float,3> > splines_view(&batch_dim,splines->get_data_ptr()+offset*4); // This creates a "view" of splines
cuNDArray<vector_td<float,3> > cu_splines(&splines_view);
if (exterior_path_lengths != NULL){
batch_dim.back() = 2;
hoCuNDArray<float> EPL_view(&batch_dim,exterior_path_lengths->get_data_ptr()+offset*2); //This creates a "view" of out
cuNDArray<float> cu_EPL(&EPL_view);
protonPathNorm(img_dims,&cu_projections, &cu_splines,phys_dims,&cu_EPL);
}else
protonPathNorm(img_dims,&cu_projections, &cu_splines,phys_dims);
hipMemcpy(projections_view.get_data_ptr(),cu_projections.get_data_ptr(),batch_size*sizeof(float),hipMemcpyDeviceToHost); //Copies back the data to the host
offset += batch_size;
}
}
template<> void protonBackprojection<hoCuNDArray>(hoCuNDArray<float>* image, hoCuNDArray<float>* projections, hoCuNDArray<floatd3>* splines, floatd3 phys_dims, hoCuNDArray<float>* exterior_path_lengths) {
cuNDArray<float> cu_image(image);
CHECK_FOR_CUDA_ERROR();
size_t floats_per_proton = exterior_path_lengths == NULL ? 13 : 15;
size_t max_batch_size = calculate_batch_size(floats_per_proton);
size_t elements = projections->get_number_of_elements();
size_t offset = 0;
for (size_t n = 0; n < (elements+max_batch_size-1)/max_batch_size; n++){
size_t batch_size = ::min(max_batch_size,elements-offset);
std::vector<size_t> batch_dim;
batch_dim.push_back(batch_size);
hoCuNDArray<float> projection_view(&batch_dim,projections->get_data_ptr()+offset); //This creates a "view" of projections
cuNDArray<float> cu_projections(&projection_view);
batch_dim.push_back(4);
hoCuNDArray<floatd3 > splines_view(&batch_dim,splines->get_data_ptr()+offset*4); // This creates a "view" of splines
cuNDArray<floatd3 > cu_splines(&splines_view);
if (exterior_path_lengths != NULL){
batch_dim.back() = 2;
hoCuNDArray<float> EPL_view(&batch_dim,exterior_path_lengths->get_data_ptr()+offset*2); //This creates a "view" of projections
cuNDArray<float> cu_EPL(&EPL_view);
protonBackprojection<cuNDArray>(&cu_image,&cu_projections,&cu_splines,phys_dims,&cu_EPL);
} else
protonBackprojection<cuNDArray>(&cu_image,&cu_projections,&cu_splines,phys_dims);
CHECK_FOR_CUDA_ERROR();
offset += batch_size;
}
hipMemcpy(image->get_data_ptr(),cu_image.get_data_ptr(),cu_image.get_number_of_elements()*sizeof(float),hipMemcpyDeviceToHost);
}
template void pad_nearest<float,1>( cuNDArray<float> *in, cuNDArray<float> *out );
template void pad_nearest<float,2>( cuNDArray<float> *in, cuNDArray<float> *out );
template void pad_nearest<float,3>( cuNDArray<float> *in, cuNDArray<float> *out );
template void pad_nearest<float,4>( cuNDArray<float> *in, cuNDArray<float> *out );
template void pad_nearest<double,1>( cuNDArray<double> *in, cuNDArray<double> *out );
template void pad_nearest<double,2>( cuNDArray<double> *in, cuNDArray<double> *out );
template void pad_nearest<double,3>( cuNDArray<double> *in, cuNDArray<double> *out );
template void pad_nearest<double,4>( cuNDArray<double> *in, cuNDArray<double> *out );
}
| 7a4ee21633ca08a75114055b74ffa84ee75bfdcf.cu |
#include "proton_utils.h"
#include "proton_kernels.h"
#include "vector_td_utilities.h"
#include "setup_grid.h"
#include "cudaDeviceManager.h"
namespace Gadgetron {
static size_t calculate_batch_size(size_t floats_per_proton = 14){
size_t mem_per_proton = floats_per_proton*sizeof(float); // Need 12 floatS for the splines and 1 for the projection
size_t free;
size_t total;
int res = cudaMemGetInfo(&free,&total);
return 1024*1024*(free/(1024*1024*mem_per_proton)); //Divisons by 1024*1024 to ensure MB batch size
}
void rotate_splines(cuNDArray<floatd3> * splines,float angle){
unsigned int elements = splines->get_number_of_elements()/4;
unsigned int threadsPerBlock =std::min(elements,(unsigned int) cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = (elements+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(std::min(totalBlocksPerGrid,cudaDeviceManager::Instance()->max_griddim()));
// Invoke kernel
int batchSize = dimGrid.x*dimBlock.x;
//std::cout << "Starting forward kernel with grid " << dimGrid.x << " " << dimGrid.y << " " << dimGrid.z << std::endl;
for (unsigned int offset = 0; offset < (elements+batchSize); offset += batchSize){
rotate_splines_kernel<float><<< dimGrid, dimBlock >>> (splines->get_data_ptr(),angle,elements,offset);
}
cudaDeviceSynchronize();
CHECK_FOR_CUDA_ERROR();
}
// Expand and fill with nearest value
template<class T, unsigned int D>
__global__ void pad_nearest_kernel( vector_td<unsigned int,D> matrix_size_in, vector_td<unsigned int,D> matrix_size_out,
const T * __restrict__ in, T * __restrict__ out, unsigned int number_of_batches, unsigned int num_elements)
{
const unsigned int idx = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x+threadIdx.x;
const unsigned int frame_offset = idx/num_elements;
if( idx < num_elements*number_of_batches ){
const typename uintd<D>::Type co_out = idx_to_co<D>( idx-frame_offset*num_elements, matrix_size_out );
const typename uintd<D>::Type offset = (matrix_size_out-matrix_size_in)>>1;
T _out;
bool inside = (co_out>=offset) && (co_out<(matrix_size_in+offset));
if( inside )
_out = in[co_to_idx<D>(co_out-offset, matrix_size_in)+frame_offset*prod(matrix_size_in)];
else{
const vector_td<unsigned int,D> co_in = amax(amin(co_out,matrix_size_in-1u),0u);
_out = in[co_to_idx<D>(co_in,matrix_size_in)+frame_offset*prod(matrix_size_in)];
}
out[idx] = _out;
}
}
template<class T, unsigned int D>
void pad_nearest( cuNDArray<T> *in, cuNDArray<T> *out )
{
if( in == 0x0 || out == 0x0 ){
throw std::runtime_error("pad: 0x0 ndarray provided");;
}
if( in->get_number_of_dimensions() != out->get_number_of_dimensions() ){
throw std::runtime_error("pad: image dimensions mismatch");;
}
if( in->get_number_of_dimensions() < D ){
std::stringstream ss;
ss << "pad: number of image dimensions should be at least " << D;
throw std::runtime_error(ss.str());;
}
typename uint64d<D>::Type matrix_size_in = from_std_vector<size_t,D>( *in->get_dimensions() );
typename uint64d<D>::Type matrix_size_out = from_std_vector<size_t,D>( *out->get_dimensions() );
unsigned int number_of_batches = 1;
for( unsigned int d=D; d<in->get_number_of_dimensions(); d++ ){
number_of_batches *= in->get_size(d);
}
if( weak_greater(matrix_size_in,matrix_size_out) ){
throw std::runtime_error("pad: size mismatch, cannot expand");
}
// Setup block/grid dimensions
dim3 blockDim; dim3 gridDim;
setup_grid( prod(matrix_size_out), &blockDim, &gridDim, number_of_batches );
// Invoke kernel
pad_nearest_kernel<T,D><<< gridDim, blockDim >>>
( vector_td<unsigned int,D>(matrix_size_in), vector_td<unsigned int,D>(matrix_size_out),
in->get_data_ptr(), out->get_data_ptr(), number_of_batches, prod(matrix_size_out) );
CHECK_FOR_CUDA_ERROR();
}
template<> void protonProjection<cuNDArray>(cuNDArray<float>* image, cuNDArray<float>* projections, cuNDArray<floatd3>* splines, floatd3 phys_dims, cuNDArray<float>* exterior_path_lengths){
int dims = projections->get_number_of_elements();
int threadsPerBlock =std::min(dims,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = (dims+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(std::min(totalBlocksPerGrid,cudaDeviceManager::Instance()->max_griddim()));
uint64d3 _dims = from_std_vector<size_t,3>( *(image->get_dimensions()) );
// Invoke kernel
int batchSize = dimGrid.x*dimBlock.x;
//std::cout << "Starting forward kernel with grid " << dimGrid.x << " " << dimGrid.y << " " << dimGrid.z << std::endl;
cudaFuncSetCacheConfig(path_kernel2<float,forward_functor<float> >, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(path_kernel<float,forward_functor<float> >, cudaFuncCachePreferL1);
for (int offset = 0; offset < (dims+batchSize); offset += batchSize){
forward_functor<float> functor(image->get_data_ptr(),projections->get_data_ptr());
if (exterior_path_lengths != NULL){
path_kernel2<float,forward_functor<float> ><<< dimGrid, dimBlock >>> (functor,splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
//forward_kernel2<float><<< dimGrid, dimBlock >>> (image->get_data_ptr(), projections->get_data_ptr(),splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}else
path_kernel<float,forward_functor<float> ><<< dimGrid, dimBlock >>> (functor,splines->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}
//cudaDeviceSynchronize();
//CHECK_FOR_CUDA_ERROR();
}
template<> void protonPathNorm<cuNDArray>(std::vector<size_t> img_dims, cuNDArray<float>* projections, cuNDArray<floatd3>* splines, floatd3 phys_dims, cuNDArray<float>* exterior_path_lengths){
int dims = projections->get_number_of_elements();
int threadsPerBlock =std::min(dims,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = (dims+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(std::min(totalBlocksPerGrid,cudaDeviceManager::Instance()->max_griddim()));
uint64d3 _dims = from_std_vector<size_t,3>( img_dims );
// Invoke kernel
int batchSize = dimGrid.x*dimBlock.x;
//std::cout << "Starting forward kernel with grid " << dimGrid.x << " " << dimGrid.y << " " << dimGrid.z << std::endl;
cudaFuncSetCacheConfig(path_kernel2<float,forward_functor<float> >, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(path_kernel<float,forward_functor<float> >, cudaFuncCachePreferL1);
for (int offset = 0; offset < (dims+batchSize); offset += batchSize){
forward_norm_functor<float> functor(projections->get_data_ptr());
if (exterior_path_lengths != NULL){
path_kernel2<float,forward_norm_functor<float> ><<< dimGrid, dimBlock >>> (functor,splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
//forward_kernel2<float><<< dimGrid, dimBlock >>> (image->get_data_ptr(), projections->get_data_ptr(),splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}else
path_kernel<float,forward_norm_functor<float> ><<< dimGrid, dimBlock >>> (functor,splines->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}
//cudaDeviceSynchronize();
//CHECK_FOR_CUDA_ERROR();
}
template<> void protonProjection<hoCuNDArray>(hoCuNDArray<float> * image,hoCuNDArray<float> * projections, hoCuNDArray<floatd3> * splines, floatd3 phys_dims, hoCuNDArray<float>* exterior_path_lengths){
size_t floats_per_proton = exterior_path_lengths == NULL ? 13 : 15;
size_t max_batch_size = calculate_batch_size(floats_per_proton);
size_t elements = projections->get_number_of_elements();
size_t offset = 0;
cuNDArray<float> cu_image(image);
for (size_t n = 0; n < (elements+max_batch_size-1)/max_batch_size; n++){
size_t batch_size = std::min(max_batch_size,elements-offset);
std::vector<size_t> batch_dim;
batch_dim.push_back(batch_size);
hoCuNDArray<float> projections_view(&batch_dim,projections->get_data_ptr()+offset); //This creates a "view" of out
cuNDArray<float> cu_projections(&projections_view);
batch_dim.push_back(4);
hoCuNDArray<vector_td<float,3> > splines_view(&batch_dim,splines->get_data_ptr()+offset*4); // This creates a "view" of splines
cuNDArray<vector_td<float,3> > cu_splines(&splines_view);
if (exterior_path_lengths != NULL){
batch_dim.back() = 2;
hoCuNDArray<float> EPL_view(&batch_dim,exterior_path_lengths->get_data_ptr()+offset*2); //This creates a "view" of out
cuNDArray<float> cu_EPL(&EPL_view);
protonProjection(&cu_image,&cu_projections, &cu_splines,phys_dims,&cu_EPL);
}else
protonProjection(&cu_image,&cu_projections, &cu_splines,phys_dims);
cudaMemcpy(projections_view.get_data_ptr(),cu_projections.get_data_ptr(),batch_size*sizeof(float),cudaMemcpyDeviceToHost); //Copies back the data to the host
offset += batch_size;
}
}
template<> void protonBackprojection<cuNDArray>(cuNDArray<float> * image, cuNDArray<float> * projections, cuNDArray<floatd3>* splines, floatd3 phys_dims, cuNDArray<float>* exterior_path_lengths){
int dims = projections->get_number_of_elements();
int threadsPerBlock =std::min(dims,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = (dims+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(std::min(totalBlocksPerGrid,cudaDeviceManager::Instance()->max_griddim()));
uint64d3 _dims = from_std_vector<size_t,3>( *(image->get_dimensions().get()) );
// Invoke kernel
int batchSize = dimBlock.x*dimGrid.x;
cudaFuncSetCacheConfig(path_kernel2<float,backward_functor<float> >, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(path_kernel<float,backward_functor<float> >, cudaFuncCachePreferL1);
for (int offset = 0; offset < (dims+batchSize); offset += batchSize){
backward_functor<float> functor(image->get_data_ptr(),projections->get_data_ptr());
if (exterior_path_lengths != NULL){
path_kernel2<float,backward_functor<float> ><<< dimGrid, dimBlock >>> (functor,splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
//backwards_kernel2<float><<< dimGrid, dimBlock >>> (projections->get_data_ptr(), image->get_data_ptr(),splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}else
path_kernel<float, backward_functor<float> ><<< dimGrid, dimBlock >>> (functor,splines->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}
}
template<> void countProtonsPerVoxel<cuNDArray>(cuNDArray<float> * image, cuNDArray<floatd3>* splines, floatd3 phys_dims, cuNDArray<float>* exterior_path_lengths){
int dims = splines->get_number_of_elements()/4;
int threadsPerBlock =std::min(dims,cudaDeviceManager::Instance()->max_blockdim());
dim3 dimBlock( threadsPerBlock);
int totalBlocksPerGrid = (dims+threadsPerBlock-1)/threadsPerBlock;
dim3 dimGrid(std::min(totalBlocksPerGrid,cudaDeviceManager::Instance()->max_griddim()));
uint64d3 _dims = from_std_vector<size_t,3>( *(image->get_dimensions().get()) );
// Invoke kernel
int batchSize = dimBlock.x*dimGrid.x;
cudaFuncSetCacheConfig(path_kernel2<float,backward_functor<float> >, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(path_kernel<float,backward_functor<float> >, cudaFuncCachePreferL1);
for (int offset = 0; offset < (dims+batchSize); offset += batchSize){
backward_counting_functor<float> functor(image->get_data_ptr());
if (exterior_path_lengths != NULL){
path_kernel2<float,backward_counting_functor<float> ><<< dimGrid, dimBlock >>> (functor,splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
//backwards_kernel2<float><<< dimGrid, dimBlock >>> (projections->get_data_ptr(), image->get_data_ptr(),splines->get_data_ptr(),exterior_path_lengths->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}else
path_kernel<float, backward_counting_functor<float> ><<< dimGrid, dimBlock >>> (functor,splines->get_data_ptr(),phys_dims, (vector_td<int,3>)_dims, dims,offset);
}
}
template<> void countProtonsPerVoxel<hoCuNDArray>(hoCuNDArray<float>* image, hoCuNDArray<floatd3>* splines, floatd3 phys_dims, hoCuNDArray<float>* exterior_path_lengths) {
cuNDArray<float> cu_image(image);
CHECK_FOR_CUDA_ERROR();
size_t floats_per_proton = exterior_path_lengths == NULL ? 13 : 15;
size_t max_batch_size = calculate_batch_size(floats_per_proton);
size_t elements = splines->get_number_of_elements()/4;
size_t offset = 0;
for (size_t n = 0; n < (elements+max_batch_size-1)/max_batch_size; n++){
size_t batch_size = std::min(max_batch_size,elements-offset);
std::vector<size_t> batch_dim;
batch_dim.push_back(batch_size);
batch_dim.push_back(4);
hoCuNDArray<floatd3 > splines_view(&batch_dim,splines->get_data_ptr()+offset*4); // This creates a "view" of splines
cuNDArray<floatd3 > cu_splines(&splines_view);
if (exterior_path_lengths != NULL){
batch_dim.back() = 2;
hoCuNDArray<float> EPL_view(&batch_dim,exterior_path_lengths->get_data_ptr()+offset*2); //This creates a "view" of projections
cuNDArray<float> cu_EPL(&EPL_view);
countProtonsPerVoxel<cuNDArray>(&cu_image,&cu_splines,phys_dims,&cu_EPL);
} else
countProtonsPerVoxel<cuNDArray>(&cu_image,&cu_splines,phys_dims);
CHECK_FOR_CUDA_ERROR();
offset += batch_size;
}
cudaMemcpy(image->get_data_ptr(),cu_image.get_data_ptr(),cu_image.get_number_of_elements()*sizeof(float),cudaMemcpyDeviceToHost);
}
template<> void protonPathNorm<hoCuNDArray>(std::vector<size_t> img_dims,hoCuNDArray<float> * projections, hoCuNDArray<floatd3> * splines, floatd3 phys_dims, hoCuNDArray<float>* exterior_path_lengths){
size_t floats_per_proton = exterior_path_lengths == NULL ? 13 : 15;
size_t max_batch_size = calculate_batch_size(floats_per_proton);
size_t elements = projections->get_number_of_elements();
size_t offset = 0;
for (size_t n = 0; n < (elements+max_batch_size-1)/max_batch_size; n++){
size_t batch_size = std::min(max_batch_size,elements-offset);
std::vector<size_t> batch_dim;
batch_dim.push_back(batch_size);
hoCuNDArray<float> projections_view(&batch_dim,projections->get_data_ptr()+offset); //This creates a "view" of out
cuNDArray<float> cu_projections(&projections_view);
batch_dim.push_back(4);
hoCuNDArray<vector_td<float,3> > splines_view(&batch_dim,splines->get_data_ptr()+offset*4); // This creates a "view" of splines
cuNDArray<vector_td<float,3> > cu_splines(&splines_view);
if (exterior_path_lengths != NULL){
batch_dim.back() = 2;
hoCuNDArray<float> EPL_view(&batch_dim,exterior_path_lengths->get_data_ptr()+offset*2); //This creates a "view" of out
cuNDArray<float> cu_EPL(&EPL_view);
protonPathNorm(img_dims,&cu_projections, &cu_splines,phys_dims,&cu_EPL);
}else
protonPathNorm(img_dims,&cu_projections, &cu_splines,phys_dims);
cudaMemcpy(projections_view.get_data_ptr(),cu_projections.get_data_ptr(),batch_size*sizeof(float),cudaMemcpyDeviceToHost); //Copies back the data to the host
offset += batch_size;
}
}
template<> void protonBackprojection<hoCuNDArray>(hoCuNDArray<float>* image, hoCuNDArray<float>* projections, hoCuNDArray<floatd3>* splines, floatd3 phys_dims, hoCuNDArray<float>* exterior_path_lengths) {
cuNDArray<float> cu_image(image);
CHECK_FOR_CUDA_ERROR();
size_t floats_per_proton = exterior_path_lengths == NULL ? 13 : 15;
size_t max_batch_size = calculate_batch_size(floats_per_proton);
size_t elements = projections->get_number_of_elements();
size_t offset = 0;
for (size_t n = 0; n < (elements+max_batch_size-1)/max_batch_size; n++){
size_t batch_size = std::min(max_batch_size,elements-offset);
std::vector<size_t> batch_dim;
batch_dim.push_back(batch_size);
hoCuNDArray<float> projection_view(&batch_dim,projections->get_data_ptr()+offset); //This creates a "view" of projections
cuNDArray<float> cu_projections(&projection_view);
batch_dim.push_back(4);
hoCuNDArray<floatd3 > splines_view(&batch_dim,splines->get_data_ptr()+offset*4); // This creates a "view" of splines
cuNDArray<floatd3 > cu_splines(&splines_view);
if (exterior_path_lengths != NULL){
batch_dim.back() = 2;
hoCuNDArray<float> EPL_view(&batch_dim,exterior_path_lengths->get_data_ptr()+offset*2); //This creates a "view" of projections
cuNDArray<float> cu_EPL(&EPL_view);
protonBackprojection<cuNDArray>(&cu_image,&cu_projections,&cu_splines,phys_dims,&cu_EPL);
} else
protonBackprojection<cuNDArray>(&cu_image,&cu_projections,&cu_splines,phys_dims);
CHECK_FOR_CUDA_ERROR();
offset += batch_size;
}
cudaMemcpy(image->get_data_ptr(),cu_image.get_data_ptr(),cu_image.get_number_of_elements()*sizeof(float),cudaMemcpyDeviceToHost);
}
template void pad_nearest<float,1>( cuNDArray<float> *in, cuNDArray<float> *out );
template void pad_nearest<float,2>( cuNDArray<float> *in, cuNDArray<float> *out );
template void pad_nearest<float,3>( cuNDArray<float> *in, cuNDArray<float> *out );
template void pad_nearest<float,4>( cuNDArray<float> *in, cuNDArray<float> *out );
template void pad_nearest<double,1>( cuNDArray<double> *in, cuNDArray<double> *out );
template void pad_nearest<double,2>( cuNDArray<double> *in, cuNDArray<double> *out );
template void pad_nearest<double,3>( cuNDArray<double> *in, cuNDArray<double> *out );
template void pad_nearest<double,4>( cuNDArray<double> *in, cuNDArray<double> *out );
}
|
86a9e9a8c0881bd383a60edbaab533b4c9597699.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "header.h"
__global__ void kernel_update_acc( particule_t *p, vector_t *acc, int size ) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i=0;
float dx,dy,dz,d,fact;
if ( j < size ) {
acc[i].x = 0.0f;
acc[i].y = 0.0f;
acc[i].z = 0.0f;
for (i = 0; i < size; ++i)
{
dx = p[i].x-p[j].x;
dy = p[i].y-p[j].y;
dz = p[i].z-p[j].z;
d = dx*dx+dy*dy+dz*dz;
if ( d < 1.0 ) d = 1.0;
fact=p[i].m/(d*sqrtf(d));
acc[i].x += dx*fact;
acc[i].y += dy*fact;
acc[i].z += dz*fact;
}
}
}
void update_acc( int nblocks, int nthreads, particule_t *p, vector_t *acc, int size) {
hipLaunchKernelGGL(( kernel_update_acc), dim3(nblocks), dim3(nthreads), 0, 0, p, acc, size);
}
| 86a9e9a8c0881bd383a60edbaab533b4c9597699.cu | #include "cuda.h"
#include "header.h"
__global__ void kernel_update_acc( particule_t *p, vector_t *acc, int size ) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i=0;
float dx,dy,dz,d,fact;
if ( j < size ) {
acc[i].x = 0.0f;
acc[i].y = 0.0f;
acc[i].z = 0.0f;
for (i = 0; i < size; ++i)
{
dx = p[i].x-p[j].x;
dy = p[i].y-p[j].y;
dz = p[i].z-p[j].z;
d = dx*dx+dy*dy+dz*dz;
if ( d < 1.0 ) d = 1.0;
fact=p[i].m/(d*sqrtf(d));
acc[i].x += dx*fact;
acc[i].y += dy*fact;
acc[i].z += dz*fact;
}
}
}
void update_acc( int nblocks, int nthreads, particule_t *p, vector_t *acc, int size) {
kernel_update_acc<<<nblocks, nthreads>>>( p, acc, size);
}
|
09c6bbf2346938a5c03098f6dfbb3715b1b3d855.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h> // need TORCH_HIP_VERSION
#include <cudnn.h>
#include <stdio.h>
int main_()
{
int version = (int)cudnnGetVersion();
printf("cudnnGetVersion() : %d , CUDNN_VERSION from cudnn.h : %d \n", version, CUDNN_VERSION);
return 0;
}
| 09c6bbf2346938a5c03098f6dfbb3715b1b3d855.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h> // need CUDA_VERSION
#include <cudnn.h>
#include <stdio.h>
int main_()
{
int version = (int)cudnnGetVersion();
printf("cudnnGetVersion() : %d , CUDNN_VERSION from cudnn.h : %d \n", version, CUDNN_VERSION);
return 0;
}
|
6e7500e76c685cfa67edeeba10adedc8d01ea94b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "prefixSumBackward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *arr = NULL;
hipMalloc(&arr, XSIZE*YSIZE);
int step = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
prefixSumBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,step);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
prefixSumBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,step);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
prefixSumBackward), dim3(gridBlock),dim3(threadBlock), 0, 0, arr,step);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6e7500e76c685cfa67edeeba10adedc8d01ea94b.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "prefixSumBackward.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *arr = NULL;
cudaMalloc(&arr, XSIZE*YSIZE);
int step = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
prefixSumBackward<<<gridBlock,threadBlock>>>(arr,step);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
prefixSumBackward<<<gridBlock,threadBlock>>>(arr,step);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
prefixSumBackward<<<gridBlock,threadBlock>>>(arr,step);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ccaa67fd37014b89fed6e9f7822d028697436469.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* =====================================================================================
*
* Filename: hydrodynamic-flow.cc
*
* Description:
*
* Version: 1.0
* Created: 03/18/2015 11:23:52 AM
* Revision: none
* Compiler: gcc
*
* Author: Boss14420 (), firefox at gmail dot com
* Organization:
*
* =====================================================================================
*/
#include <cmath>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <cstring>
#include "common.hpp"
#define SWAP(x, y) (x ^= y ^= x ^= y);
#define STRIDE 128
#define WIDTH 120
#define HEIGHT 120
#define OBSTACLE_MIN_X 25
#define OBSTACLE_MAX_X 60
#define OBSTACLE_MIN_Y 10
#define OBSTACLE_MAX_Y 40
//#define dx .01f
//#define dy .01f
//#define dt .01f
#define INDEXU(x, y) ((x) * (STRIDE) + (y))
#define INDEXV(x, y) ((x) * (STRIDE) + (y))
#define INDEXP(x, y) ((x)*STRIDE+ (y))
#define SQR(x) ((x) * (x))
#define nu 0.1f
#define Dtolerance 0.01f
int *nodivergence;
//__device__ int nodivergence = 1;
#define cellPerThreadX 2
#define cellPerThreadY 1
#define boundaryCellPerThreadX 16
#define boundaryCellPerThreadY 16
__const__ dim3 dimBlock(16, 16);
__const__ dim3 dimGrid( (((WIDTH + dimBlock.x - 1)/dimBlock.x) + cellPerThreadX - 1)/cellPerThreadX,
(((HEIGHT+ dimBlock.y - 1)/dimBlock.y) + cellPerThreadY - 1)/cellPerThreadY
);
__const__ dim3 dimGrid2( (((WIDTH + dimBlock.x - 1)/dimBlock.x) + boundaryCellPerThreadX - 1)/boundaryCellPerThreadX,
(((HEIGHT+ dimBlock.y - 1)/dimBlock.y) + boundaryCellPerThreadY - 1)/boundaryCellPerThreadY
);
/*
P[INDEXP(i, j)] <-- P[i, j]
uc[INDEXU(i, j)] <-- u[i-1/2, j]
vc[INDEXV(i, j)] <-- v[i, j-1/2]
*/
template <class T>
struct SimpleBoundary
{
private:
public:
int x0, x1, y0, y1;
T InFlowU, InFlowV;
public:
// __global__ SimpleBoundary(SimpleBoundary const &sb);
__host__ __device__ SimpleBoundary(int x0, int x1, int y0, int y1, T InFlowU, T InFlowV)
: x0(x0), x1(x1), y0(y0), y1(y1), InFlowU(InFlowU), InFlowV(InFlowV)
{}
// __host__ __device__ SimpleBoundary(SimpleBoundary const &sb)
// : x0(sb.x0), x1(sb.x1), y0(sb.y0), y1(sb.y1), InFlowU(sb.InFlowU), InFlowV(sb.InFlowV)
// {}
// __host__ __device__ ~SimpleBoundary() {}
__device__ bool isInFlowBoundary(int i, int j) const { return i == 0; }
__device__ bool isOutFlowBoundary(int i, int j) const { return i == WIDTH; }
__device__ bool isFloorBoundary(int i, int j) const { return j == 0; }
__device__ bool isCeilingBoundary(int i, int j) const { return j == HEIGHT; }
__device__ bool isObstacle(int i, int j) const {
return x0 <= i && i <= x1 && y0 <= j && j <= y1;
}
__device__ T inflowU() const { return InFlowU; }
__device__ T inflowV() const { return InFlowV; }
};
template <typename T, typename BoundaryCond>
__global__
void update_boundary(T *uc, T *vc, T *P, BoundaryCond bound)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int startY = x*boundaryCellPerThreadX + 1;
int endY = min(startY + boundaryCellPerThreadX, WIDTH);
int startX = y*boundaryCellPerThreadY + 1;
int endX = min(startX + boundaryCellPerThreadY, HEIGHT);
// printf("Thread (%d, %d), startX = %d, startY = %d\n", x, y, startX, startY);
int i, j;
for (j = startY; j < endY; ++j) {
uc[INDEXU(0, j)] = bound.inflowU();
vc[INDEXV(0, j)] = bound.inflowV();
uc[INDEXU(WIDTH, j)] = uc[INDEXU(WIDTH-1, j)];
vc[INDEXV(WIDTH, j)] = vc[INDEXV(WIDTH-1, j)];
P[INDEXP(WIDTH, j)] = P[INDEXP(WIDTH-1,j)];
}
for (i = startX; i < endX; ++i) {
uc[INDEXU(i, 0)] = uc[INDEXU(i, HEIGHT-1)];
vc[INDEXV(i, 0)] = vc[INDEXU(i, HEIGHT-1)];
P[INDEXP(i, 0)] = P[INDEXP(i, HEIGHT-1)];
// uc[INDEXU(i, 0)] = 0;//uc[INDEXU(i, 1)];
// vc[INDEXV(i, 0)] = 0;//vc[INDEXU(i, 1)];
uc[INDEXU(i, HEIGHT)] = uc[INDEXU(i, 1)];
vc[INDEXV(i, HEIGHT)] = vc[INDEXV(i, 1)];
P[INDEXV(i, HEIGHT)] = P[INDEXV(i, 1)];
// uc[INDEXU(i, HEIGHT)] = 0;// uc[INDEXU(i, HEIGHT-1)];
// vc[INDEXV(i, HEIGHT)] = 0;// vc[INDEXV(i, HEIGHT-1)];
}
}
template <typename T, typename BoundaryCond>
__global__
void update_uv(T const *uc, T const *vc, T const *P, T *un, T *vn, T dt, T dx, T dy, BoundaryCond bound)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int startY = x*cellPerThreadX + 1;
int endY = min(startY + cellPerThreadX, WIDTH);
int startX = y*cellPerThreadY + 1;
int endX = min(startX + cellPerThreadY, HEIGHT);
int i, j;
for (i = startX; i < endX; ++i) {
for (j = startY; j < endY; ++j) {
if (bound.isObstacle(i, j)) {
// zero velocity
un[INDEXU(i, j)] = 0;
vn[INDEXU(i, j)] = 0;
} else {
// TODO: use shared memory
T Du, Dv;
T uim12j = uc[INDEXU(i-1,j)]; // u(i-1/2, j)
T uip12j = uc[INDEXU(i, j)]; // u(i+1/2, j)
T uip32j = uc[INDEXU(i+1, j)]; // u(i+3/2, j)
T uip12jp1 = uc[INDEXU(i, j+1)]; // u(i+1/2, j+1)
T uip12jm1 = uc[INDEXU(i, j-1)]; // u(i+1/2, j-1)
T uim12jp1 = uc[INDEXU(i-1, j+1)]; // u(i-1/2, j+1)
T vijp12 = vc[INDEXV(i, j)]; // v(i, j+1/2)
T vijp32 = vc[INDEXP(i, j+1)]; // v(i, j+3/2);
T vijm12 = vc[INDEXV(i, j-1)]; // v(i, j-1/2)
T vip1jp12 = vc[INDEXV(i+1, j)]; // v(i+1, j+1/2)
T vip1jm12 = vc[INDEXV(i+1, j-1)]; // v(i+1, j-1/2)
T vim1jp12 = vc[INDEXV(i-1, j)]; // v(i-1, j+1/2);
// Du
T uij = .5 * (uip12j + uim12j);
T uip1j = .5 * (uip32j + uip12j);
Du = -1/dx * (uip1j*uip1j - uij*uij);
// uv(i+1/2, j+1/2)
T uvip12jp12 = .5*(uip12j + uip12jp1) * .5*(vip1jp12 + vijp12);
// u(i+1/2, j+1/2)
T uvip12jm12 = .5*(uip12jm1 + uip12j) * .5*(vip1jm12 + vijm12);
Du += -1/dy * (uvip12jp12 - uvip12jm12);
Du += -1/dx*(P[INDEXP(i,j)] - P[INDEXP(i-1,j)]);
Du += nu*( (uip32j - 2*uip12j + uim12j) / (dx*dx)
+(uip12jp1 - 2*uip12j + uip12jm1) / (dy*dy) );
// DFu(uc, vc, P, dx, dy, i, j, Du);
un[INDEXU(i, j)] = uc[INDEXU(i, j)] + dt * Du;
// Dv
T vij = .5 * (vijp12 + vijm12); // v(i, j)
T vijp1 = .5 * (vijp32 + vijp12); // v(i, j+1)
Dv = -1/dy * (vijp1*vijp1 - vij*vij);
// uv(i-1/2, j+1/2)
T uvim12jp12 = .5*(uim12j + uim12jp1) * .5*(vim1jp12 + vijp12);
Dv += -1/dx * (uvip12jm12 - uvim12jp12);
Dv += -1/dy * (P[INDEXP(i, j)] - P[INDEXP(i, j-1)]);
Dv += nu*( (vijp32 - 2*vijp12 + vijm12) / (dy*dy)
+(vip1jp12 - 2*vijp12 + vim1jp12) / (dx*dx) );
// DFv(uc, vc, P, dx, dy, i, j, Dv);
vn[INDEXV(i, j)] = vc[INDEXV(i, j)] + dt * Dv;
}
}
}
}
template <typename T> struct double_vec;
template <> struct double_vec<float> { typedef float2 type; };
template <> struct double_vec<double> { typedef double2 type; };
template <typename T, typename BoundaryCond>
__global__
void adjust_puv(T const *uc, T const *vc, T *P, T *un, T *vn,
T dt, T dx, T dy, T beta,
int *nodivergence,
BoundaryCond bound, bool cellType)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int shift = (y % 2) ^ cellType;
int startY = x*cellPerThreadX + 1;
int endY = min(startY + cellPerThreadX, WIDTH);
int startX = y*cellPerThreadY + 1;
int endX = min(startX + cellPerThreadY, HEIGHT);
T D, delta_P;
int thread_nodivergence = 1;
//typename double_vec<T>::type u12; // coalesced access u
//T *vij, *vijp1;
//T *uij, *uip1j;
for (int i = startX; i < endX; ++i, shift = 1-shift) {
for (int j = startY+shift; j < endY; j+=2) {
if (bound.isObstacle(i, j)) {
//P[INDEXP(i, j)] = 0;
} else {
//u12 = *(float2*) (un + INDEXU(i, j)); // u12 = { u(i, j), u(i+1, j) }
//uij = un + INDEXU(i, j);
//uip1j = un + INDEXU(i+1, j);
//vij = vn + INDEXV(i, j);
//vijp1 = vn + INDEXV(i, j+1);
D = 1/dx * (un[INDEXU(i+1,j)] - un[INDEXU(i,j)])
+1/dy * (vn[INDEXV(i,j+1)] - vn[INDEXV(i,j)]);
//D = 1/dx * (u12.y - u12.x) + 1/dy * (*vijp1 - *vij);
//D = 1/dx * (*uip1j - *uij) + 1/dy * (*vijp1 - *vij);
//if (fabs(D) > Dtolerance) {
delta_P = -beta * D;
P[INDEXP(i,j)] += delta_P;
un[INDEXU(i,j)] -= (dt/dx)*delta_P;
//*uij -= (dt/dx)*delta_P;
un[INDEXU(i+1,j)] += (dt/dx)*delta_P;
//*uip1j += (dt/dx)*delta_P;
//*vij -= (dt/dy)*delta_P;
//*vijp1 += (dt/dy)*delta_P;
vn[INDEXV(i,j)] -= (dt/dy)*delta_P;
vn[INDEXV(i,j+1)] += (dt/dy)*delta_P;
// thread_nodivergence = 0;
//}
thread_nodivergence &= (fabs(D) <= Dtolerance);
}
}
}
//int warp_nodivergence = __all(thread_nodivergence);
// first thread in a warp
//if ( (threadIdx.y * blockDim.x + threadIdx.x) % warpSize == 0) {
int bn = __syncthreads_and(thread_nodivergence);
// first thread in a block
if ( (threadIdx.y == 0) && (threadIdx.x == 0) ) {
//atomicAnd(&nodivergence, bn);
atomicAnd(nodivergence, bn);
}
}
template <typename T, typename BoundaryCond>
//__global__
void time_step(T *uc, T *vc, T *P, T *un, T *vn,
T dt, T print_step, T dx, T dy, T beta, BoundaryCond bound)
{
/*
dim3 dimBlock(16, 16);
dim3 dimGrid( (((WIDTH + dimBlock.x - 1)/dimBlock.x) + cellPerThreadX - 1)/cellPerThreadX,
(((HEIGHT+ dimBlock.y - 1)/dimBlock.y) + cellPerThreadY - 1)/cellPerThreadY
);
dim3 dimGrid2( (((WIDTH + dimBlock.x - 1)/dimBlock.x) + boundaryCellPerThreadX - 1)/boundaryCellPerThreadX,
(((HEIGHT+ dimBlock.y - 1)/dimBlock.y) + boundaryCellPerThreadY - 1)/boundaryCellPerThreadY
);
*/
int steps = print_step / dt;
while(steps--) {
hipLaunchKernelGGL(( update_uv), dim3(dimGrid), dim3(dimBlock), 0, 0, uc, vc, P, un, vn, dt, dx, dy, bound);
hipDeviceSynchronize();
hipLaunchKernelGGL(( update_boundary), dim3(dimGrid2), dim3(dimBlock), 0, 0, un, vn, P, bound);
hipDeviceSynchronize();
//int iteration = 0;
int hnodivergence = 1;
do {
// printf("Iteration %d\r", ++iteration);
hnodivergence = 1;
//nodivergence = 1;
hipMemcpy(nodivergence, &hnodivergence, sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( adjust_puv), dim3(dimGrid), dim3(dimBlock), 0, 0, uc, vc, P, un, vn, dt, dx, dy, beta, nodivergence, bound, true);
hipDeviceSynchronize();
hipLaunchKernelGGL(( adjust_puv), dim3(dimGrid), dim3(dimBlock), 0, 0, uc, vc, P, un, vn, dt, dx, dy, beta, nodivergence, bound, false);
hipDeviceSynchronize();
hipMemcpy(&hnodivergence, nodivergence, sizeof(int), hipMemcpyDeviceToHost);
} while (!hnodivergence);
// printf("\n");
hipLaunchKernelGGL(( update_boundary), dim3(dimGrid2), dim3(dimBlock), 0, 0, un, vn, P, bound);
hipDeviceSynchronize();
// swap (uc, un), (vc, vn)
T *tmpc = uc; uc = un; un = tmpc;
tmpc = vc; vc = vn; vn = tmpc;
}
}
template <typename T, typename BoundaryCond>
void initialize(T* &ucurrent, T* &vcurrent, T* &unew, T* &vnew, T* &P, T* &huc, T* &hvc,
int* &nodivergence,
BoundaryCond bound)
{
hipMalloc(&ucurrent, STRIDE*STRIDE * sizeof(T));
hipMalloc(&vcurrent, STRIDE*STRIDE * sizeof(T));
hipMalloc(&unew, STRIDE*STRIDE * sizeof(T));
hipMalloc(&vnew, STRIDE*STRIDE * sizeof(T));
hipMalloc(&P, STRIDE*STRIDE * sizeof(T));
// hipMemset(ucurrent, 0, STRIDE*STRIDE * sizeof(T));
// hipMemset(vcurrent, 0, STRIDE*STRIDE * sizeof(T));
hipMemset(unew, 0, STRIDE*STRIDE * sizeof(T));
hipMemset(vnew, 0, STRIDE*STRIDE * sizeof(T));
hipMemset(P, 0, STRIDE*STRIDE * sizeof(T));
hipMalloc(&nodivergence, sizeof(*nodivergence));
// inflow boundary
// for (int j = 0; j <= HEIGHT; ++j) {
// ucurrent[INDEXU(0, j)] = bound.inflowU();
// vcurrent[INDEXU(0, j)] = bound.inflowV();
// }
//hipLaunchKernelGGL(( update_boundary), dim3(dimGrid2), dim3(dimBlock), 0, 0, ucurrent, vcurrent, P, bound);
// hipDeviceSynchronize();
huc = (T*) std::malloc(STRIDE * STRIDE * sizeof(T));
hvc = (T*) std::malloc(STRIDE * STRIDE * sizeof(T));
std::memset(huc, 0, STRIDE * STRIDE * sizeof(T));
std::memset(hvc, 0, STRIDE * STRIDE * sizeof(T));
for (int j = 0; j <= HEIGHT; ++j) {
huc[j] = 1;
hvc[j] = 0;
}
// hipMemcpy(huc, ucurrent, STRIDE*STRIDE*sizeof(T), hipMemcpyDeviceToHost);
// hipMemcpy(hvc, vcurrent, STRIDE*STRIDE*sizeof(T), hipMemcpyDeviceToHost);
hipMemcpy(ucurrent, huc, STRIDE*STRIDE*sizeof(T), hipMemcpyHostToDevice);
hipMemcpy(vcurrent, hvc, STRIDE*STRIDE*sizeof(T), hipMemcpyHostToDevice);
// hipDeviceSynchronize();
}
template <typename T>
void freememory(T* ucurrent, T* vcurrent, T* unew, T* vnew, T* P, T* huc, T* hvc, int *nodivergence)
{
hipFree(ucurrent);
hipFree(vcurrent);
hipFree(unew);
hipFree(vnew);
hipFree(P);
hipFree(nodivergence);
std::free(huc);
std::free(hvc);
}
template <typename T>
void print_velocity(T const *uc, T const *vc, int index)
{
char name[20];
std::printf("Step %d\n", index);
T const *u = uc, *v = vc;
// T *u = new T[STRIDE*STRIDE];
// T *v = new T[STRIDE*STRIDE];
// for (int j = 0; j < HEIGHT; ++j)
// for (int i = 0; i < WIDTH; ++i) {
// u[INDEXU(i, j)] = .5 * (uc[INDEXU(i, j)] + uc[INDEXU(i+1, j)]);
// v[INDEXU(i, j)] = .5 * (vc[INDEXU(i, j)] + vc[INDEXU(i, j+1)]);
// }
// std::cout << "u\n";
std::snprintf(name, 19, "uc%04d.pgm", index);
exportPixmap(u, WIDTH, HEIGHT, STRIDE, name);
std::snprintf(name, 19, "uc%04d.mat", index);
exportMatlab(u, WIDTH, HEIGHT, STRIDE, name);
//exportValue(uc);
// std::cout << "\nv\n";
std::snprintf(name, 19, "vc%04d.pgm", index);
exportPixmap(v, WIDTH, HEIGHT, STRIDE, name);
std::snprintf(name, 19, "vc%04d.mat", index);
exportMatlab(v, WIDTH, HEIGHT, STRIDE, name);
//exportValue(vc);
//std::cout << "\n\n";
// delete[] u;
// delete[] v;
}
template <typename T, typename BoundaryCond>
void flow(T total_time, T print_step, T dt, T dx, T dy, T beta0, BoundaryCond &bound)
{
T *uc, *vc, *un, *vn, *P;
//int *blk_nodivergence;
T *huc, *hvc;
initialize(uc, vc, un, vn, P, huc, hvc, nodivergence, bound);
T beta = beta0 / (2*dt*(1/(dx*dx) + 1/(dy*dy)));
T accumulate_time = 0;
T last_printed = 0;
int index = 0;
print_velocity(huc, hvc, index++);
while (accumulate_time < total_time) {
accumulate_time += print_step;
//time_step<<<1,1>>>(uc, vc, P, un, vn, dt, print_step, dx, dy, beta, bound);
time_step(uc, vc, P, un, vn, dt, print_step, dx, dy, beta, bound);
hipDeviceSynchronize();
std::swap(uc, un);
std::swap(vc, vn);
// if (accumulate_time >= last_printed + print_step) {
hipMemcpy(huc, uc, STRIDE*STRIDE*sizeof(T), hipMemcpyDeviceToHost);
hipMemcpy(hvc, vc, STRIDE*STRIDE*sizeof(T), hipMemcpyDeviceToHost);
// hipDeviceSynchronize();
last_printed += print_step;
print_velocity(huc, hvc, index++);
// }
}
freememory(uc, vc, un, vn, P, huc, hvc, nodivergence);
}
int main()
{
typedef float T;
/////// calculate occupancy
int numBlocks;
int potentialBlockSize;
int minGridSize;//, gridSize;
hipOccupancyMaxPotentialBlockSize(&minGridSize, &potentialBlockSize, adjust_puv<T, SimpleBoundary<T> >, 0, WIDTH*HEIGHT);
std::cout << "minGridSize: " << minGridSize
<< ", potentialblockSize: " << potentialBlockSize << '\n';
int device;
hipDeviceProp_t prop;
int activeWarps;
int maxWarps;
hipGetDevice(&device);
hipGetDeviceProperties(&prop, device);
int blockSize = dimBlock.x * dimBlock.y;
hipOccupancyMaxActiveBlocksPerMultiprocessor(
&numBlocks,
adjust_puv<T, SimpleBoundary<T> >,
blockSize,
0);
activeWarps = numBlocks * blockSize/ prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
std::cout << "activeWarps: " << activeWarps
<< ", maxWarps: " << maxWarps << '\n';
std::cout << "Occupancy: " << (double)activeWarps/maxWarps<< '\n';
std::cout << "No of blocks: " << dimGrid.x * dimGrid.y << '\n';
/////////////////////////////////////////////////////////////////////////////////////
SimpleBoundary<T> sb ( OBSTACLE_MIN_X, OBSTACLE_MAX_X,
OBSTACLE_MIN_Y, OBSTACLE_MAX_Y,
1.f, 0.0f );
T dx = .01, dy = .01, dt = .0001;
T total_time = 900*dt, print_step = 100*dt;
T beta0 = 1.7f;
flow<T, SimpleBoundary<T> >(total_time, print_step, dt, dx, dy, beta0, sb);
}
| ccaa67fd37014b89fed6e9f7822d028697436469.cu | /*
* =====================================================================================
*
* Filename: hydrodynamic-flow.cc
*
* Description:
*
* Version: 1.0
* Created: 03/18/2015 11:23:52 AM
* Revision: none
* Compiler: gcc
*
* Author: Boss14420 (), firefox at gmail dot com
* Organization:
*
* =====================================================================================
*/
#include <cmath>
#include <cstdio>
#include <iostream>
#include <iomanip>
#include <cstring>
#include "common.hpp"
#define SWAP(x, y) (x ^= y ^= x ^= y);
#define STRIDE 128
#define WIDTH 120
#define HEIGHT 120
#define OBSTACLE_MIN_X 25
#define OBSTACLE_MAX_X 60
#define OBSTACLE_MIN_Y 10
#define OBSTACLE_MAX_Y 40
//#define dx .01f
//#define dy .01f
//#define dt .01f
#define INDEXU(x, y) ((x) * (STRIDE) + (y))
#define INDEXV(x, y) ((x) * (STRIDE) + (y))
#define INDEXP(x, y) ((x)*STRIDE+ (y))
#define SQR(x) ((x) * (x))
#define nu 0.1f
#define Dtolerance 0.01f
int *nodivergence;
//__device__ int nodivergence = 1;
#define cellPerThreadX 2
#define cellPerThreadY 1
#define boundaryCellPerThreadX 16
#define boundaryCellPerThreadY 16
__const__ dim3 dimBlock(16, 16);
__const__ dim3 dimGrid( (((WIDTH + dimBlock.x - 1)/dimBlock.x) + cellPerThreadX - 1)/cellPerThreadX,
(((HEIGHT+ dimBlock.y - 1)/dimBlock.y) + cellPerThreadY - 1)/cellPerThreadY
);
__const__ dim3 dimGrid2( (((WIDTH + dimBlock.x - 1)/dimBlock.x) + boundaryCellPerThreadX - 1)/boundaryCellPerThreadX,
(((HEIGHT+ dimBlock.y - 1)/dimBlock.y) + boundaryCellPerThreadY - 1)/boundaryCellPerThreadY
);
/*
P[INDEXP(i, j)] <-- P[i, j]
uc[INDEXU(i, j)] <-- u[i-1/2, j]
vc[INDEXV(i, j)] <-- v[i, j-1/2]
*/
template <class T>
struct SimpleBoundary
{
private:
public:
int x0, x1, y0, y1;
T InFlowU, InFlowV;
public:
// __global__ SimpleBoundary(SimpleBoundary const &sb);
__host__ __device__ SimpleBoundary(int x0, int x1, int y0, int y1, T InFlowU, T InFlowV)
: x0(x0), x1(x1), y0(y0), y1(y1), InFlowU(InFlowU), InFlowV(InFlowV)
{}
// __host__ __device__ SimpleBoundary(SimpleBoundary const &sb)
// : x0(sb.x0), x1(sb.x1), y0(sb.y0), y1(sb.y1), InFlowU(sb.InFlowU), InFlowV(sb.InFlowV)
// {}
// __host__ __device__ ~SimpleBoundary() {}
__device__ bool isInFlowBoundary(int i, int j) const { return i == 0; }
__device__ bool isOutFlowBoundary(int i, int j) const { return i == WIDTH; }
__device__ bool isFloorBoundary(int i, int j) const { return j == 0; }
__device__ bool isCeilingBoundary(int i, int j) const { return j == HEIGHT; }
__device__ bool isObstacle(int i, int j) const {
return x0 <= i && i <= x1 && y0 <= j && j <= y1;
}
__device__ T inflowU() const { return InFlowU; }
__device__ T inflowV() const { return InFlowV; }
};
template <typename T, typename BoundaryCond>
__global__
void update_boundary(T *uc, T *vc, T *P, BoundaryCond bound)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int startY = x*boundaryCellPerThreadX + 1;
int endY = min(startY + boundaryCellPerThreadX, WIDTH);
int startX = y*boundaryCellPerThreadY + 1;
int endX = min(startX + boundaryCellPerThreadY, HEIGHT);
// printf("Thread (%d, %d), startX = %d, startY = %d\n", x, y, startX, startY);
int i, j;
for (j = startY; j < endY; ++j) {
uc[INDEXU(0, j)] = bound.inflowU();
vc[INDEXV(0, j)] = bound.inflowV();
uc[INDEXU(WIDTH, j)] = uc[INDEXU(WIDTH-1, j)];
vc[INDEXV(WIDTH, j)] = vc[INDEXV(WIDTH-1, j)];
P[INDEXP(WIDTH, j)] = P[INDEXP(WIDTH-1,j)];
}
for (i = startX; i < endX; ++i) {
uc[INDEXU(i, 0)] = uc[INDEXU(i, HEIGHT-1)];
vc[INDEXV(i, 0)] = vc[INDEXU(i, HEIGHT-1)];
P[INDEXP(i, 0)] = P[INDEXP(i, HEIGHT-1)];
// uc[INDEXU(i, 0)] = 0;//uc[INDEXU(i, 1)];
// vc[INDEXV(i, 0)] = 0;//vc[INDEXU(i, 1)];
uc[INDEXU(i, HEIGHT)] = uc[INDEXU(i, 1)];
vc[INDEXV(i, HEIGHT)] = vc[INDEXV(i, 1)];
P[INDEXV(i, HEIGHT)] = P[INDEXV(i, 1)];
// uc[INDEXU(i, HEIGHT)] = 0;// uc[INDEXU(i, HEIGHT-1)];
// vc[INDEXV(i, HEIGHT)] = 0;// vc[INDEXV(i, HEIGHT-1)];
}
}
template <typename T, typename BoundaryCond>
__global__
void update_uv(T const *uc, T const *vc, T const *P, T *un, T *vn, T dt, T dx, T dy, BoundaryCond bound)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int startY = x*cellPerThreadX + 1;
int endY = min(startY + cellPerThreadX, WIDTH);
int startX = y*cellPerThreadY + 1;
int endX = min(startX + cellPerThreadY, HEIGHT);
int i, j;
for (i = startX; i < endX; ++i) {
for (j = startY; j < endY; ++j) {
if (bound.isObstacle(i, j)) {
// zero velocity
un[INDEXU(i, j)] = 0;
vn[INDEXU(i, j)] = 0;
} else {
// TODO: use shared memory
T Du, Dv;
T uim12j = uc[INDEXU(i-1,j)]; // u(i-1/2, j)
T uip12j = uc[INDEXU(i, j)]; // u(i+1/2, j)
T uip32j = uc[INDEXU(i+1, j)]; // u(i+3/2, j)
T uip12jp1 = uc[INDEXU(i, j+1)]; // u(i+1/2, j+1)
T uip12jm1 = uc[INDEXU(i, j-1)]; // u(i+1/2, j-1)
T uim12jp1 = uc[INDEXU(i-1, j+1)]; // u(i-1/2, j+1)
T vijp12 = vc[INDEXV(i, j)]; // v(i, j+1/2)
T vijp32 = vc[INDEXP(i, j+1)]; // v(i, j+3/2);
T vijm12 = vc[INDEXV(i, j-1)]; // v(i, j-1/2)
T vip1jp12 = vc[INDEXV(i+1, j)]; // v(i+1, j+1/2)
T vip1jm12 = vc[INDEXV(i+1, j-1)]; // v(i+1, j-1/2)
T vim1jp12 = vc[INDEXV(i-1, j)]; // v(i-1, j+1/2);
// Du
T uij = .5 * (uip12j + uim12j);
T uip1j = .5 * (uip32j + uip12j);
Du = -1/dx * (uip1j*uip1j - uij*uij);
// uv(i+1/2, j+1/2)
T uvip12jp12 = .5*(uip12j + uip12jp1) * .5*(vip1jp12 + vijp12);
// u(i+1/2, j+1/2)
T uvip12jm12 = .5*(uip12jm1 + uip12j) * .5*(vip1jm12 + vijm12);
Du += -1/dy * (uvip12jp12 - uvip12jm12);
Du += -1/dx*(P[INDEXP(i,j)] - P[INDEXP(i-1,j)]);
Du += nu*( (uip32j - 2*uip12j + uim12j) / (dx*dx)
+(uip12jp1 - 2*uip12j + uip12jm1) / (dy*dy) );
// DFu(uc, vc, P, dx, dy, i, j, Du);
un[INDEXU(i, j)] = uc[INDEXU(i, j)] + dt * Du;
// Dv
T vij = .5 * (vijp12 + vijm12); // v(i, j)
T vijp1 = .5 * (vijp32 + vijp12); // v(i, j+1)
Dv = -1/dy * (vijp1*vijp1 - vij*vij);
// uv(i-1/2, j+1/2)
T uvim12jp12 = .5*(uim12j + uim12jp1) * .5*(vim1jp12 + vijp12);
Dv += -1/dx * (uvip12jm12 - uvim12jp12);
Dv += -1/dy * (P[INDEXP(i, j)] - P[INDEXP(i, j-1)]);
Dv += nu*( (vijp32 - 2*vijp12 + vijm12) / (dy*dy)
+(vip1jp12 - 2*vijp12 + vim1jp12) / (dx*dx) );
// DFv(uc, vc, P, dx, dy, i, j, Dv);
vn[INDEXV(i, j)] = vc[INDEXV(i, j)] + dt * Dv;
}
}
}
}
template <typename T> struct double_vec;
template <> struct double_vec<float> { typedef float2 type; };
template <> struct double_vec<double> { typedef double2 type; };
template <typename T, typename BoundaryCond>
__global__
void adjust_puv(T const *uc, T const *vc, T *P, T *un, T *vn,
T dt, T dx, T dy, T beta,
int *nodivergence,
BoundaryCond bound, bool cellType)
{
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int shift = (y % 2) ^ cellType;
int startY = x*cellPerThreadX + 1;
int endY = min(startY + cellPerThreadX, WIDTH);
int startX = y*cellPerThreadY + 1;
int endX = min(startX + cellPerThreadY, HEIGHT);
T D, delta_P;
int thread_nodivergence = 1;
//typename double_vec<T>::type u12; // coalesced access u
//T *vij, *vijp1;
//T *uij, *uip1j;
for (int i = startX; i < endX; ++i, shift = 1-shift) {
for (int j = startY+shift; j < endY; j+=2) {
if (bound.isObstacle(i, j)) {
//P[INDEXP(i, j)] = 0;
} else {
//u12 = *(float2*) (un + INDEXU(i, j)); // u12 = { u(i, j), u(i+1, j) }
//uij = un + INDEXU(i, j);
//uip1j = un + INDEXU(i+1, j);
//vij = vn + INDEXV(i, j);
//vijp1 = vn + INDEXV(i, j+1);
D = 1/dx * (un[INDEXU(i+1,j)] - un[INDEXU(i,j)])
+1/dy * (vn[INDEXV(i,j+1)] - vn[INDEXV(i,j)]);
//D = 1/dx * (u12.y - u12.x) + 1/dy * (*vijp1 - *vij);
//D = 1/dx * (*uip1j - *uij) + 1/dy * (*vijp1 - *vij);
//if (fabs(D) > Dtolerance) {
delta_P = -beta * D;
P[INDEXP(i,j)] += delta_P;
un[INDEXU(i,j)] -= (dt/dx)*delta_P;
//*uij -= (dt/dx)*delta_P;
un[INDEXU(i+1,j)] += (dt/dx)*delta_P;
//*uip1j += (dt/dx)*delta_P;
//*vij -= (dt/dy)*delta_P;
//*vijp1 += (dt/dy)*delta_P;
vn[INDEXV(i,j)] -= (dt/dy)*delta_P;
vn[INDEXV(i,j+1)] += (dt/dy)*delta_P;
// thread_nodivergence = 0;
//}
thread_nodivergence &= (fabs(D) <= Dtolerance);
}
}
}
//int warp_nodivergence = __all(thread_nodivergence);
// first thread in a warp
//if ( (threadIdx.y * blockDim.x + threadIdx.x) % warpSize == 0) {
int bn = __syncthreads_and(thread_nodivergence);
// first thread in a block
if ( (threadIdx.y == 0) && (threadIdx.x == 0) ) {
//atomicAnd(&nodivergence, bn);
atomicAnd(nodivergence, bn);
}
}
template <typename T, typename BoundaryCond>
//__global__
void time_step(T *uc, T *vc, T *P, T *un, T *vn,
T dt, T print_step, T dx, T dy, T beta, BoundaryCond bound)
{
/*
dim3 dimBlock(16, 16);
dim3 dimGrid( (((WIDTH + dimBlock.x - 1)/dimBlock.x) + cellPerThreadX - 1)/cellPerThreadX,
(((HEIGHT+ dimBlock.y - 1)/dimBlock.y) + cellPerThreadY - 1)/cellPerThreadY
);
dim3 dimGrid2( (((WIDTH + dimBlock.x - 1)/dimBlock.x) + boundaryCellPerThreadX - 1)/boundaryCellPerThreadX,
(((HEIGHT+ dimBlock.y - 1)/dimBlock.y) + boundaryCellPerThreadY - 1)/boundaryCellPerThreadY
);
*/
int steps = print_step / dt;
while(steps--) {
update_uv<<<dimGrid, dimBlock>>>(uc, vc, P, un, vn, dt, dx, dy, bound);
cudaDeviceSynchronize();
update_boundary<<<dimGrid2, dimBlock>>>(un, vn, P, bound);
cudaDeviceSynchronize();
//int iteration = 0;
int hnodivergence = 1;
do {
// printf("Iteration %d\r", ++iteration);
hnodivergence = 1;
//nodivergence = 1;
cudaMemcpy(nodivergence, &hnodivergence, sizeof(int), cudaMemcpyHostToDevice);
adjust_puv<<<dimGrid, dimBlock>>>(uc, vc, P, un, vn, dt, dx, dy, beta, nodivergence, bound, true);
cudaDeviceSynchronize();
adjust_puv<<<dimGrid, dimBlock>>>(uc, vc, P, un, vn, dt, dx, dy, beta, nodivergence, bound, false);
cudaDeviceSynchronize();
cudaMemcpy(&hnodivergence, nodivergence, sizeof(int), cudaMemcpyDeviceToHost);
} while (!hnodivergence);
// printf("\n");
update_boundary<<<dimGrid2, dimBlock>>>(un, vn, P, bound);
cudaDeviceSynchronize();
// swap (uc, un), (vc, vn)
T *tmpc = uc; uc = un; un = tmpc;
tmpc = vc; vc = vn; vn = tmpc;
}
}
template <typename T, typename BoundaryCond>
void initialize(T* &ucurrent, T* &vcurrent, T* &unew, T* &vnew, T* &P, T* &huc, T* &hvc,
int* &nodivergence,
BoundaryCond bound)
{
cudaMalloc(&ucurrent, STRIDE*STRIDE * sizeof(T));
cudaMalloc(&vcurrent, STRIDE*STRIDE * sizeof(T));
cudaMalloc(&unew, STRIDE*STRIDE * sizeof(T));
cudaMalloc(&vnew, STRIDE*STRIDE * sizeof(T));
cudaMalloc(&P, STRIDE*STRIDE * sizeof(T));
// cudaMemset(ucurrent, 0, STRIDE*STRIDE * sizeof(T));
// cudaMemset(vcurrent, 0, STRIDE*STRIDE * sizeof(T));
cudaMemset(unew, 0, STRIDE*STRIDE * sizeof(T));
cudaMemset(vnew, 0, STRIDE*STRIDE * sizeof(T));
cudaMemset(P, 0, STRIDE*STRIDE * sizeof(T));
cudaMalloc(&nodivergence, sizeof(*nodivergence));
// inflow boundary
// for (int j = 0; j <= HEIGHT; ++j) {
// ucurrent[INDEXU(0, j)] = bound.inflowU();
// vcurrent[INDEXU(0, j)] = bound.inflowV();
// }
// update_boundary<<<dimGrid2, dimBlock>>>(ucurrent, vcurrent, P, bound);
// cudaDeviceSynchronize();
huc = (T*) std::malloc(STRIDE * STRIDE * sizeof(T));
hvc = (T*) std::malloc(STRIDE * STRIDE * sizeof(T));
std::memset(huc, 0, STRIDE * STRIDE * sizeof(T));
std::memset(hvc, 0, STRIDE * STRIDE * sizeof(T));
for (int j = 0; j <= HEIGHT; ++j) {
huc[j] = 1;
hvc[j] = 0;
}
// cudaMemcpy(huc, ucurrent, STRIDE*STRIDE*sizeof(T), cudaMemcpyDeviceToHost);
// cudaMemcpy(hvc, vcurrent, STRIDE*STRIDE*sizeof(T), cudaMemcpyDeviceToHost);
cudaMemcpy(ucurrent, huc, STRIDE*STRIDE*sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(vcurrent, hvc, STRIDE*STRIDE*sizeof(T), cudaMemcpyHostToDevice);
// cudaDeviceSynchronize();
}
template <typename T>
void freememory(T* ucurrent, T* vcurrent, T* unew, T* vnew, T* P, T* huc, T* hvc, int *nodivergence)
{
cudaFree(ucurrent);
cudaFree(vcurrent);
cudaFree(unew);
cudaFree(vnew);
cudaFree(P);
cudaFree(nodivergence);
std::free(huc);
std::free(hvc);
}
template <typename T>
void print_velocity(T const *uc, T const *vc, int index)
{
char name[20];
std::printf("Step %d\n", index);
T const *u = uc, *v = vc;
// T *u = new T[STRIDE*STRIDE];
// T *v = new T[STRIDE*STRIDE];
// for (int j = 0; j < HEIGHT; ++j)
// for (int i = 0; i < WIDTH; ++i) {
// u[INDEXU(i, j)] = .5 * (uc[INDEXU(i, j)] + uc[INDEXU(i+1, j)]);
// v[INDEXU(i, j)] = .5 * (vc[INDEXU(i, j)] + vc[INDEXU(i, j+1)]);
// }
// std::cout << "u\n";
std::snprintf(name, 19, "uc%04d.pgm", index);
exportPixmap(u, WIDTH, HEIGHT, STRIDE, name);
std::snprintf(name, 19, "uc%04d.mat", index);
exportMatlab(u, WIDTH, HEIGHT, STRIDE, name);
//exportValue(uc);
// std::cout << "\nv\n";
std::snprintf(name, 19, "vc%04d.pgm", index);
exportPixmap(v, WIDTH, HEIGHT, STRIDE, name);
std::snprintf(name, 19, "vc%04d.mat", index);
exportMatlab(v, WIDTH, HEIGHT, STRIDE, name);
//exportValue(vc);
//std::cout << "\n\n";
// delete[] u;
// delete[] v;
}
template <typename T, typename BoundaryCond>
void flow(T total_time, T print_step, T dt, T dx, T dy, T beta0, BoundaryCond &bound)
{
T *uc, *vc, *un, *vn, *P;
//int *blk_nodivergence;
T *huc, *hvc;
initialize(uc, vc, un, vn, P, huc, hvc, nodivergence, bound);
T beta = beta0 / (2*dt*(1/(dx*dx) + 1/(dy*dy)));
T accumulate_time = 0;
T last_printed = 0;
int index = 0;
print_velocity(huc, hvc, index++);
while (accumulate_time < total_time) {
accumulate_time += print_step;
//time_step<<<1,1>>>(uc, vc, P, un, vn, dt, print_step, dx, dy, beta, bound);
time_step(uc, vc, P, un, vn, dt, print_step, dx, dy, beta, bound);
cudaDeviceSynchronize();
std::swap(uc, un);
std::swap(vc, vn);
// if (accumulate_time >= last_printed + print_step) {
cudaMemcpy(huc, uc, STRIDE*STRIDE*sizeof(T), cudaMemcpyDeviceToHost);
cudaMemcpy(hvc, vc, STRIDE*STRIDE*sizeof(T), cudaMemcpyDeviceToHost);
// cudaDeviceSynchronize();
last_printed += print_step;
print_velocity(huc, hvc, index++);
// }
}
freememory(uc, vc, un, vn, P, huc, hvc, nodivergence);
}
int main()
{
typedef float T;
/////// calculate occupancy
int numBlocks;
int potentialBlockSize;
int minGridSize;//, gridSize;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &potentialBlockSize, adjust_puv<T, SimpleBoundary<T> >, 0, WIDTH*HEIGHT);
std::cout << "minGridSize: " << minGridSize
<< ", potentialblockSize: " << potentialBlockSize << '\n';
int device;
cudaDeviceProp prop;
int activeWarps;
int maxWarps;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
int blockSize = dimBlock.x * dimBlock.y;
cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&numBlocks,
adjust_puv<T, SimpleBoundary<T> >,
blockSize,
0);
activeWarps = numBlocks * blockSize/ prop.warpSize;
maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize;
std::cout << "activeWarps: " << activeWarps
<< ", maxWarps: " << maxWarps << '\n';
std::cout << "Occupancy: " << (double)activeWarps/maxWarps<< '\n';
std::cout << "No of blocks: " << dimGrid.x * dimGrid.y << '\n';
/////////////////////////////////////////////////////////////////////////////////////
SimpleBoundary<T> sb ( OBSTACLE_MIN_X, OBSTACLE_MAX_X,
OBSTACLE_MIN_Y, OBSTACLE_MAX_Y,
1.f, 0.0f );
T dx = .01, dy = .01, dt = .0001;
T total_time = 900*dt, print_step = 100*dt;
T beta0 = 1.7f;
flow<T, SimpleBoundary<T> >(total_time, print_step, dt, dx, dy, beta0, sb);
}
|
vec_scale.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <chrono>
#include <string>
#include "vec_bench.h"
#include "cuda_helper.h"
__global__ void scale(index_t n, value_t scale, value_t *x)
{
index_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n / 2)
{
auto tmp = reinterpret_cast<double2 *>(x);
tmp[i].x *= scale;
tmp[i].y *= scale;
}
// in only one thread, process final elements (if there are any)
index_t remainder = n % 2;
if (i == n / 2 && remainder != 0)
{
while (remainder)
{
int i = n - remainder--;
x[i] *= scale;
}
}
}
struct vec_scale_cuda : public vec_bench
{
void benchmark()
{
memory_transfer_per_loop = 2.0 * sizeof(value_t) * double(vec_size) * 2.0 /
(1024.0 * 1024.0 * 1024.0);
dim3 block = dim3(block_x, 1, 1);
dim3 grid = dim3((vec_size/2 + block.x - 1) / block.x, 1, 1);
std::cout << " Block: " << block.x << "(x) X " << block.y << "(y)\n"
<< " Grid: " << grid.x << "(x) X " << grid.y << "(y)\n\n";
value_t *x = new value_t[vec_size];
value_t doubleit = 2.0;
value_t halfit = 0.5;
for (index_t j = 0; j < vec_size; j++)
{
x[j] = 1.0;
}
value_t *d_x;
checkCudaErrors(hipMalloc(&d_x, vec_size * sizeof(value_t)));
hipEvent_t start, stop;
checkCudaErrors(hipEventCreate(&start));
checkCudaErrors(hipEventCreate(&stop));
checkCudaErrors(hipMemcpy(d_x, x, vec_size * sizeof(value_t), hipMemcpyHostToDevice));
index_t loops = 0;
auto startcpu = std::chrono::high_resolution_clock::now();
checkCudaErrors(hipEventRecord(start));
while ((std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - startcpu)
.count()) < 1000.0 * benchtime)
{
hipLaunchKernelGGL(( scale), dim3(grid), dim3(block), 0, 0, vec_size, doubleit, d_x);
hipLaunchKernelGGL(( scale), dim3(grid), dim3(block), 0, 0, vec_size, halfit, d_x);
checkCudaErrorsAfterKernels
loops++;
}
checkCudaErrors(hipEventRecord(stop));
checkCudaErrors(hipEventSynchronize(stop));
float duration = 0;
checkCudaErrors(hipEventElapsedTime(&duration, start, stop));
checkCudaErrors(hipMemcpy(x, d_x, vec_size * sizeof(value_t), hipMemcpyDeviceToHost));
test_result(x, vec_size, value_t(vec_size));
std::cout << " y[0] " << x[0] << '\n';
print_performance(double(loops), 1.0e-3 * duration);
delete[] x;
checkCudaErrors(hipFree(d_x));
}
vec_scale_cuda(int narg, char **arg) : vec_bench(narg, arg) {}
};
int main(int narg, char **arg)
{
check_cuda_device();
vec_scale_cuda test(narg, arg);
test.benchmark();
} | vec_scale.cu | #include <chrono>
#include <string>
#include "vec_bench.h"
#include "cuda_helper.h"
__global__ void scale(index_t n, value_t scale, value_t *x)
{
index_t i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n / 2)
{
auto tmp = reinterpret_cast<double2 *>(x);
tmp[i].x *= scale;
tmp[i].y *= scale;
}
// in only one thread, process final elements (if there are any)
index_t remainder = n % 2;
if (i == n / 2 && remainder != 0)
{
while (remainder)
{
int i = n - remainder--;
x[i] *= scale;
}
}
}
struct vec_scale_cuda : public vec_bench
{
void benchmark()
{
memory_transfer_per_loop = 2.0 * sizeof(value_t) * double(vec_size) * 2.0 /
(1024.0 * 1024.0 * 1024.0);
dim3 block = dim3(block_x, 1, 1);
dim3 grid = dim3((vec_size/2 + block.x - 1) / block.x, 1, 1);
std::cout << " Block: " << block.x << "(x) X " << block.y << "(y)\n"
<< " Grid: " << grid.x << "(x) X " << grid.y << "(y)\n\n";
value_t *x = new value_t[vec_size];
value_t doubleit = 2.0;
value_t halfit = 0.5;
for (index_t j = 0; j < vec_size; j++)
{
x[j] = 1.0;
}
value_t *d_x;
checkCudaErrors(cudaMalloc(&d_x, vec_size * sizeof(value_t)));
cudaEvent_t start, stop;
checkCudaErrors(cudaEventCreate(&start));
checkCudaErrors(cudaEventCreate(&stop));
checkCudaErrors(cudaMemcpy(d_x, x, vec_size * sizeof(value_t), cudaMemcpyHostToDevice));
index_t loops = 0;
auto startcpu = std::chrono::high_resolution_clock::now();
checkCudaErrors(cudaEventRecord(start));
while ((std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now() - startcpu)
.count()) < 1000.0 * benchtime)
{
scale<<<grid, block>>>(vec_size, doubleit, d_x);
scale<<<grid, block>>>(vec_size, halfit, d_x);
checkCudaErrorsAfterKernels
loops++;
}
checkCudaErrors(cudaEventRecord(stop));
checkCudaErrors(cudaEventSynchronize(stop));
float duration = 0;
checkCudaErrors(cudaEventElapsedTime(&duration, start, stop));
checkCudaErrors(cudaMemcpy(x, d_x, vec_size * sizeof(value_t), cudaMemcpyDeviceToHost));
test_result(x, vec_size, value_t(vec_size));
std::cout << " y[0] " << x[0] << '\n';
print_performance(double(loops), 1.0e-3 * duration);
delete[] x;
checkCudaErrors(cudaFree(d_x));
}
vec_scale_cuda(int narg, char **arg) : vec_bench(narg, arg) {}
};
int main(int narg, char **arg)
{
check_cuda_device();
vec_scale_cuda test(narg, arg);
test.benchmark();
} |
3e9780a293ad28e9494b7025f643c80163ae2b31.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "ActiveForceComputeGPU.cuh"
#include "EvaluatorConstraintEllipsoid.h"
#include "hoomd/RandomNumbers.h"
#include "hoomd/RNGIdentifiers.h"
#include "hoomd/TextureTools.h"
using namespace hoomd;
#include <assert.h>
/*! \file ActiveForceComputeGPU.cu
\brief Declares GPU kernel code for calculating active forces forces on the GPU. Used by ActiveForceComputeGPU.
*/
//! Kernel for setting active force vectors on the GPU
/*! \param group_size number of particles
\param d_index_array stores list to convert group index to global tag
\param d_force particle force on device
\param d_torque particle torque on device
\param d_orientation particle orientation on device
\param d_f_act particle active force unit vector
\param d_t_act particle active torque unit vector
\param P position of the ellipsoid constraint
\param rx radius of the ellipsoid in x direction
\param ry radius of the ellipsoid in y direction
\param rz radius of the ellipsoid in z direction
\param orientationLink check if particle orientation is linked to active force vector
*/
__global__ void gpu_compute_active_force_set_forces_kernel(const unsigned int group_size,
unsigned int *d_index_array,
Scalar4 *d_force,
Scalar4 *d_torque,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar4 *d_t_act,
const Scalar3 P,
const Scalar rx,
const Scalar ry,
const Scalar rz,
const unsigned int N)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_index_array[group_idx];
Scalar4 posidx = __ldg(d_pos + idx);
unsigned int type = __scalar_as_int(posidx.w);
Scalar4 fact = __ldg(d_f_act + type);
vec3<Scalar> f(fact.w*fact.x, fact.w*fact.y, fact.w*fact.z);
quat<Scalar> quati( __ldg(d_orientation + idx));
vec3<Scalar> fi = rotate(quati, f);
d_force[idx] = vec_to_scalar4(fi, 0);
Scalar4 tact = __ldg(d_t_act + type);
vec3<Scalar> t(tact.w*tact.x, tact.w*tact.y, tact.w*tact.z);
vec3<Scalar> ti = rotate(quati, t);
d_torque[idx] = vec_to_scalar4(ti, 0);
}
//! Kernel for adjusting active force vectors to align parallel to an ellipsoid surface constraint on the GPU
/*! \param group_size number of particles
\param d_index_array stores list to convert group index to global tag
\param d_pos particle positions on device
\param d_f_act particle active force unit vector
\param P position of the ellipsoid constraint
\param rx radius of the ellipsoid in x direction
\param ry radius of the ellipsoid in y direction
\param rz radius of the ellipsoid in z direction
*/
__global__ void gpu_compute_active_force_set_constraints_kernel(const unsigned int group_size,
unsigned int *d_index_array,
const Scalar4 *d_pos,
Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar3 P,
const Scalar rx,
const Scalar ry,
const Scalar rz)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_index_array[group_idx];
Scalar4 posidx = __ldg(d_pos + idx);
unsigned int type = __scalar_as_int(posidx.w);
EvaluatorConstraintEllipsoid Ellipsoid(P, rx, ry, rz);
Scalar3 current_pos = make_scalar3(posidx.x, posidx.y, posidx.z);
Scalar3 norm_scalar3 = Ellipsoid.evalNormal(current_pos); // the normal vector to which the particles are confined.
vec3<Scalar> norm = vec3<Scalar>(norm_scalar3);
Scalar4 fact = __ldg(d_f_act + type);
vec3<Scalar> f(fact.x, fact.y, fact.z);
quat<Scalar> quati( __ldg(d_orientation + idx));
vec3<Scalar> fi = rotate(quati, f);
Scalar dot_prod = fi.x * norm.x + fi.y * norm.y + fi.z * norm.z;
Scalar dot_perp_prod = slow::sqrt(1-dot_prod*dot_prod);
Scalar phi_half = slow::atan(dot_prod/dot_perp_prod)/2.0;
fi.x -= norm.x * dot_prod;
fi.y -= norm.y * dot_prod;
fi.z -= norm.z * dot_prod;
Scalar new_norm = 1.0/slow::sqrt(fi.x*fi.x + fi.y*fi.y + fi.z*fi.z);
fi.x *= new_norm;
fi.y *= new_norm;
fi.z *= new_norm;
vec3<Scalar> rot_vec = cross(norm,fi);
rot_vec.x *= slow::sin(phi_half);
rot_vec.y *= slow::sin(phi_half);
rot_vec.z *= slow::sin(phi_half);
quat<Scalar> rot_quat(cos(phi_half),rot_vec);
quati = rot_quat*quati;
d_orientation[idx] = quat_to_scalar4(quati);
}
//! Kernel for applying rotational diffusion to active force vectors on the GPU
/*! \param group_size number of particles
\param d_index_array stores list to convert group index to global tag
\param d_pos particle positions on device
\param d_f_act particle active force unit vector
\param P position of the ellipsoid constraint
\param rx radius of the ellipsoid in x direction
\param ry radius of the ellipsoid in y direction
\param rz radius of the ellipsoid in z direction
\param is2D check if simulation is 2D or 3D
\param rotationConst particle rotational diffusion constant
\param seed seed for random number generator
*/
__global__ void gpu_compute_active_force_rotational_diffusion_kernel(const unsigned int group_size,
unsigned int *d_tag,
unsigned int *d_index_array,
const Scalar4 *d_pos,
Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar3 P,
const Scalar rx,
const Scalar ry,
const Scalar rz,
bool is2D,
const Scalar rotationConst,
const uint64_t timestep,
const uint16_t seed)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_index_array[group_idx];
Scalar4 posidx = __ldg(d_pos + idx);
unsigned int type = __scalar_as_int(posidx.w);
unsigned int ptag = d_tag[group_idx];
quat<Scalar> quati( __ldg(d_orientation + idx));
hoomd::RandomGenerator rng(hoomd::Seed(hoomd::RNGIdentifier::ActiveForceCompute,
timestep,
seed),
hoomd::Counter(ptag));
if (is2D) // 2D
{
Scalar delta_theta; // rotational diffusion angle
delta_theta = hoomd::NormalDistribution<Scalar>(rotationConst)(rng);
Scalar theta = delta_theta/2.0; // angle on plane defining orientation of active force vector
vec3<Scalar> b(0,0,slow::sin(theta));
quat<Scalar> rot_quat(slow::cos(theta),b);
quati = rot_quat*quati;
d_orientation[idx] = quat_to_scalar4(quati);
// in 2D there is only one meaningful direction for torque
}
else // 3D: Following Stenhammar, Soft Matter, 2014
{
if (rx == 0) // if no constraint
{
hoomd::SpherePointGenerator<Scalar> unit_vec;
vec3<Scalar> rand_vec;
unit_vec(rng, rand_vec);
Scalar4 fact = __ldg(d_f_act + type);
vec3<Scalar> f(fact.x, fact.y, fact.z);
vec3<Scalar> fi = rotate(quati, f);
vec3<Scalar> aux_vec;
aux_vec.x = fi.y * rand_vec.z - fi.z * rand_vec.y;
aux_vec.y = fi.z * rand_vec.x - fi.x * rand_vec.z;
aux_vec.z = fi.x * rand_vec.y - fi.y * rand_vec.x;
Scalar aux_vec_mag = 1.0/slow::sqrt(aux_vec.x*aux_vec.x + aux_vec.y*aux_vec.y + aux_vec.z*aux_vec.z);
aux_vec.x *= aux_vec_mag;
aux_vec.y *= aux_vec_mag;
aux_vec.z *= aux_vec_mag;
Scalar delta_theta = hoomd::NormalDistribution<Scalar>(rotationConst)(rng);
Scalar theta = delta_theta/2.0; // angle on plane defining orientation of active force vector
quat<Scalar> rot_quat(slow::cos(theta),slow::sin(theta)*aux_vec);
quati = rot_quat*quati;
d_orientation[idx].x = quati.s;
d_orientation[idx].y = quati.v.x;
d_orientation[idx].z = quati.v.y;
d_orientation[idx].w = quati.v.z;
}
else // if constraint
{
EvaluatorConstraintEllipsoid Ellipsoid(P, rx, ry, rz);
Scalar3 current_pos = make_scalar3(posidx.x, posidx.y, posidx.z);
Scalar3 norm_scalar3 = Ellipsoid.evalNormal(current_pos); // the normal vector to which the particles are confined.
vec3<Scalar> norm;
norm = vec3<Scalar> (norm_scalar3);
Scalar delta_theta = hoomd::NormalDistribution<Scalar>(rotationConst)(rng);
Scalar theta = delta_theta/2.0; // angle on plane defining orientation of active force vector
quat<Scalar> rot_quat(slow::cos(theta),slow::sin(theta)*norm);
quati = rot_quat*quati;
d_orientation[idx] = quat_to_scalar4(quati);
}
}
}
hipError_t gpu_compute_active_force_set_forces(const unsigned int group_size,
unsigned int *d_index_array,
Scalar4 *d_force,
Scalar4 *d_torque,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar4 *d_t_act,
const Scalar3& P,
const Scalar rx,
const Scalar ry,
const Scalar rz,
const unsigned int N,
unsigned int block_size)
{
// setup the grid to run the kernel
dim3 grid( group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipMemset(d_force, 0, sizeof(Scalar4)*N);
hipLaunchKernelGGL((gpu_compute_active_force_set_forces_kernel), dim3(grid), dim3(threads), 0, 0, group_size,
d_index_array,
d_force,
d_torque,
d_pos,
d_orientation,
d_f_act,
d_t_act,
P,
rx,
ry,
rz,
N);
return hipSuccess;
}
hipError_t gpu_compute_active_force_set_constraints(const unsigned int group_size,
unsigned int *d_index_array,
const Scalar4 *d_pos,
Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar3& P,
const Scalar rx,
const Scalar ry,
const Scalar rz,
unsigned int block_size)
{
// setup the grid to run the kernel
dim3 grid( group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_compute_active_force_set_constraints_kernel), dim3(grid), dim3(threads), 0, 0, group_size,
d_index_array,
d_pos,
d_orientation,
d_f_act,
P,
rx,
ry,
rz);
return hipSuccess;
}
hipError_t gpu_compute_active_force_rotational_diffusion(const unsigned int group_size,
unsigned int *d_tag,
unsigned int *d_index_array,
const Scalar4 *d_pos,
Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar3& P,
const Scalar rx,
const Scalar ry,
const Scalar rz,
bool is2D,
const Scalar rotationConst,
const uint64_t timestep,
const uint16_t seed,
unsigned int block_size)
{
// setup the grid to run the kernel
dim3 grid( group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_compute_active_force_rotational_diffusion_kernel), dim3(grid), dim3(threads), 0, 0, group_size,
d_tag,
d_index_array,
d_pos,
d_orientation,
d_f_act,
P,
rx,
ry,
rz,
is2D,
rotationConst,
timestep,
seed);
return hipSuccess;
}
| 3e9780a293ad28e9494b7025f643c80163ae2b31.cu | // Copyright (c) 2009-2021 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
// Maintainer: joaander
#include "ActiveForceComputeGPU.cuh"
#include "EvaluatorConstraintEllipsoid.h"
#include "hoomd/RandomNumbers.h"
#include "hoomd/RNGIdentifiers.h"
#include "hoomd/TextureTools.h"
using namespace hoomd;
#include <assert.h>
/*! \file ActiveForceComputeGPU.cu
\brief Declares GPU kernel code for calculating active forces forces on the GPU. Used by ActiveForceComputeGPU.
*/
//! Kernel for setting active force vectors on the GPU
/*! \param group_size number of particles
\param d_index_array stores list to convert group index to global tag
\param d_force particle force on device
\param d_torque particle torque on device
\param d_orientation particle orientation on device
\param d_f_act particle active force unit vector
\param d_t_act particle active torque unit vector
\param P position of the ellipsoid constraint
\param rx radius of the ellipsoid in x direction
\param ry radius of the ellipsoid in y direction
\param rz radius of the ellipsoid in z direction
\param orientationLink check if particle orientation is linked to active force vector
*/
__global__ void gpu_compute_active_force_set_forces_kernel(const unsigned int group_size,
unsigned int *d_index_array,
Scalar4 *d_force,
Scalar4 *d_torque,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar4 *d_t_act,
const Scalar3 P,
const Scalar rx,
const Scalar ry,
const Scalar rz,
const unsigned int N)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_index_array[group_idx];
Scalar4 posidx = __ldg(d_pos + idx);
unsigned int type = __scalar_as_int(posidx.w);
Scalar4 fact = __ldg(d_f_act + type);
vec3<Scalar> f(fact.w*fact.x, fact.w*fact.y, fact.w*fact.z);
quat<Scalar> quati( __ldg(d_orientation + idx));
vec3<Scalar> fi = rotate(quati, f);
d_force[idx] = vec_to_scalar4(fi, 0);
Scalar4 tact = __ldg(d_t_act + type);
vec3<Scalar> t(tact.w*tact.x, tact.w*tact.y, tact.w*tact.z);
vec3<Scalar> ti = rotate(quati, t);
d_torque[idx] = vec_to_scalar4(ti, 0);
}
//! Kernel for adjusting active force vectors to align parallel to an ellipsoid surface constraint on the GPU
/*! \param group_size number of particles
\param d_index_array stores list to convert group index to global tag
\param d_pos particle positions on device
\param d_f_act particle active force unit vector
\param P position of the ellipsoid constraint
\param rx radius of the ellipsoid in x direction
\param ry radius of the ellipsoid in y direction
\param rz radius of the ellipsoid in z direction
*/
__global__ void gpu_compute_active_force_set_constraints_kernel(const unsigned int group_size,
unsigned int *d_index_array,
const Scalar4 *d_pos,
Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar3 P,
const Scalar rx,
const Scalar ry,
const Scalar rz)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_index_array[group_idx];
Scalar4 posidx = __ldg(d_pos + idx);
unsigned int type = __scalar_as_int(posidx.w);
EvaluatorConstraintEllipsoid Ellipsoid(P, rx, ry, rz);
Scalar3 current_pos = make_scalar3(posidx.x, posidx.y, posidx.z);
Scalar3 norm_scalar3 = Ellipsoid.evalNormal(current_pos); // the normal vector to which the particles are confined.
vec3<Scalar> norm = vec3<Scalar>(norm_scalar3);
Scalar4 fact = __ldg(d_f_act + type);
vec3<Scalar> f(fact.x, fact.y, fact.z);
quat<Scalar> quati( __ldg(d_orientation + idx));
vec3<Scalar> fi = rotate(quati, f);
Scalar dot_prod = fi.x * norm.x + fi.y * norm.y + fi.z * norm.z;
Scalar dot_perp_prod = slow::sqrt(1-dot_prod*dot_prod);
Scalar phi_half = slow::atan(dot_prod/dot_perp_prod)/2.0;
fi.x -= norm.x * dot_prod;
fi.y -= norm.y * dot_prod;
fi.z -= norm.z * dot_prod;
Scalar new_norm = 1.0/slow::sqrt(fi.x*fi.x + fi.y*fi.y + fi.z*fi.z);
fi.x *= new_norm;
fi.y *= new_norm;
fi.z *= new_norm;
vec3<Scalar> rot_vec = cross(norm,fi);
rot_vec.x *= slow::sin(phi_half);
rot_vec.y *= slow::sin(phi_half);
rot_vec.z *= slow::sin(phi_half);
quat<Scalar> rot_quat(cos(phi_half),rot_vec);
quati = rot_quat*quati;
d_orientation[idx] = quat_to_scalar4(quati);
}
//! Kernel for applying rotational diffusion to active force vectors on the GPU
/*! \param group_size number of particles
\param d_index_array stores list to convert group index to global tag
\param d_pos particle positions on device
\param d_f_act particle active force unit vector
\param P position of the ellipsoid constraint
\param rx radius of the ellipsoid in x direction
\param ry radius of the ellipsoid in y direction
\param rz radius of the ellipsoid in z direction
\param is2D check if simulation is 2D or 3D
\param rotationConst particle rotational diffusion constant
\param seed seed for random number generator
*/
__global__ void gpu_compute_active_force_rotational_diffusion_kernel(const unsigned int group_size,
unsigned int *d_tag,
unsigned int *d_index_array,
const Scalar4 *d_pos,
Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar3 P,
const Scalar rx,
const Scalar ry,
const Scalar rz,
bool is2D,
const Scalar rotationConst,
const uint64_t timestep,
const uint16_t seed)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= group_size)
return;
unsigned int idx = d_index_array[group_idx];
Scalar4 posidx = __ldg(d_pos + idx);
unsigned int type = __scalar_as_int(posidx.w);
unsigned int ptag = d_tag[group_idx];
quat<Scalar> quati( __ldg(d_orientation + idx));
hoomd::RandomGenerator rng(hoomd::Seed(hoomd::RNGIdentifier::ActiveForceCompute,
timestep,
seed),
hoomd::Counter(ptag));
if (is2D) // 2D
{
Scalar delta_theta; // rotational diffusion angle
delta_theta = hoomd::NormalDistribution<Scalar>(rotationConst)(rng);
Scalar theta = delta_theta/2.0; // angle on plane defining orientation of active force vector
vec3<Scalar> b(0,0,slow::sin(theta));
quat<Scalar> rot_quat(slow::cos(theta),b);
quati = rot_quat*quati;
d_orientation[idx] = quat_to_scalar4(quati);
// in 2D there is only one meaningful direction for torque
}
else // 3D: Following Stenhammar, Soft Matter, 2014
{
if (rx == 0) // if no constraint
{
hoomd::SpherePointGenerator<Scalar> unit_vec;
vec3<Scalar> rand_vec;
unit_vec(rng, rand_vec);
Scalar4 fact = __ldg(d_f_act + type);
vec3<Scalar> f(fact.x, fact.y, fact.z);
vec3<Scalar> fi = rotate(quati, f);
vec3<Scalar> aux_vec;
aux_vec.x = fi.y * rand_vec.z - fi.z * rand_vec.y;
aux_vec.y = fi.z * rand_vec.x - fi.x * rand_vec.z;
aux_vec.z = fi.x * rand_vec.y - fi.y * rand_vec.x;
Scalar aux_vec_mag = 1.0/slow::sqrt(aux_vec.x*aux_vec.x + aux_vec.y*aux_vec.y + aux_vec.z*aux_vec.z);
aux_vec.x *= aux_vec_mag;
aux_vec.y *= aux_vec_mag;
aux_vec.z *= aux_vec_mag;
Scalar delta_theta = hoomd::NormalDistribution<Scalar>(rotationConst)(rng);
Scalar theta = delta_theta/2.0; // angle on plane defining orientation of active force vector
quat<Scalar> rot_quat(slow::cos(theta),slow::sin(theta)*aux_vec);
quati = rot_quat*quati;
d_orientation[idx].x = quati.s;
d_orientation[idx].y = quati.v.x;
d_orientation[idx].z = quati.v.y;
d_orientation[idx].w = quati.v.z;
}
else // if constraint
{
EvaluatorConstraintEllipsoid Ellipsoid(P, rx, ry, rz);
Scalar3 current_pos = make_scalar3(posidx.x, posidx.y, posidx.z);
Scalar3 norm_scalar3 = Ellipsoid.evalNormal(current_pos); // the normal vector to which the particles are confined.
vec3<Scalar> norm;
norm = vec3<Scalar> (norm_scalar3);
Scalar delta_theta = hoomd::NormalDistribution<Scalar>(rotationConst)(rng);
Scalar theta = delta_theta/2.0; // angle on plane defining orientation of active force vector
quat<Scalar> rot_quat(slow::cos(theta),slow::sin(theta)*norm);
quati = rot_quat*quati;
d_orientation[idx] = quat_to_scalar4(quati);
}
}
}
hipError_t gpu_compute_active_force_set_forces(const unsigned int group_size,
unsigned int *d_index_array,
Scalar4 *d_force,
Scalar4 *d_torque,
const Scalar4 *d_pos,
const Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar4 *d_t_act,
const Scalar3& P,
const Scalar rx,
const Scalar ry,
const Scalar rz,
const unsigned int N,
unsigned int block_size)
{
// setup the grid to run the kernel
dim3 grid( group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipMemset(d_force, 0, sizeof(Scalar4)*N);
hipLaunchKernelGGL((gpu_compute_active_force_set_forces_kernel), dim3(grid), dim3(threads), 0, 0, group_size,
d_index_array,
d_force,
d_torque,
d_pos,
d_orientation,
d_f_act,
d_t_act,
P,
rx,
ry,
rz,
N);
return hipSuccess;
}
hipError_t gpu_compute_active_force_set_constraints(const unsigned int group_size,
unsigned int *d_index_array,
const Scalar4 *d_pos,
Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar3& P,
const Scalar rx,
const Scalar ry,
const Scalar rz,
unsigned int block_size)
{
// setup the grid to run the kernel
dim3 grid( group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_compute_active_force_set_constraints_kernel), dim3(grid), dim3(threads), 0, 0, group_size,
d_index_array,
d_pos,
d_orientation,
d_f_act,
P,
rx,
ry,
rz);
return hipSuccess;
}
hipError_t gpu_compute_active_force_rotational_diffusion(const unsigned int group_size,
unsigned int *d_tag,
unsigned int *d_index_array,
const Scalar4 *d_pos,
Scalar4 *d_orientation,
const Scalar4 *d_f_act,
const Scalar3& P,
const Scalar rx,
const Scalar ry,
const Scalar rz,
bool is2D,
const Scalar rotationConst,
const uint64_t timestep,
const uint16_t seed,
unsigned int block_size)
{
// setup the grid to run the kernel
dim3 grid( group_size / block_size + 1, 1, 1);
dim3 threads(block_size, 1, 1);
// run the kernel
hipLaunchKernelGGL((gpu_compute_active_force_rotational_diffusion_kernel), dim3(grid), dim3(threads), 0, 0, group_size,
d_tag,
d_index_array,
d_pos,
d_orientation,
d_f_act,
P,
rx,
ry,
rz,
is2D,
rotationConst,
timestep,
seed);
return hipSuccess;
}
|
7ffa0fe7d32766906ae572808ca15b679525cb37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void im2col_pad_kernel(float *im, int channels, int height, int width, int ksize, int stride, float *data_col)
{
int c,h,w;
int height_col = 1 + (height-1) / stride;
int width_col = 1 + (width-1) / stride;
int channels_col = channels * ksize * ksize;
int pad = ksize/2;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int col_size = height_col*width_col*channels_col;
if (id >= col_size) return;
int col_index = id;
w = id % width_col;
id /= width_col;
h = id % height_col;
id /= height_col;
c = id % channels_col;
id /= channels_col;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int im_channel = c / ksize / ksize;
int im_row = h_offset + h * stride - pad;
int im_col = w_offset + w * stride - pad;
int im_index = im_col + width*(im_row + height*im_channel);
float val = (im_row < 0 || im_col < 0 || im_row >= height || im_col >= width) ? 0 : im[im_index];
data_col[col_index] = val;
} | 7ffa0fe7d32766906ae572808ca15b679525cb37.cu | #include "includes.h"
__global__ void im2col_pad_kernel(float *im, int channels, int height, int width, int ksize, int stride, float *data_col)
{
int c,h,w;
int height_col = 1 + (height-1) / stride;
int width_col = 1 + (width-1) / stride;
int channels_col = channels * ksize * ksize;
int pad = ksize/2;
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int col_size = height_col*width_col*channels_col;
if (id >= col_size) return;
int col_index = id;
w = id % width_col;
id /= width_col;
h = id % height_col;
id /= height_col;
c = id % channels_col;
id /= channels_col;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int im_channel = c / ksize / ksize;
int im_row = h_offset + h * stride - pad;
int im_col = w_offset + w * stride - pad;
int im_index = im_col + width*(im_row + height*im_channel);
float val = (im_row < 0 || im_col < 0 || im_row >= height || im_col >= width) ? 0 : im[im_index];
data_col[col_index] = val;
} |
69cbb4164fb9115d2ae1ba8a5f703ac8254a9b02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "string.h"
#include <stddef.h>
#define _USE_MATH_DEFINES
#include "math.h"
#include "helper_cuda.h"
//texture <float, 2, hipReadModeElementType> radius;
//texture <float, 2, hipReadModeElementType> angle;
//texture <float, 2, hipReadModeElementType> sensor_model;
//__constant__ float x;
//__constant__ float y;
//__constant__ float theta;
__constant__ int mapW;
__constant__ int mapH;
__constant__ float resolution;
__constant__ float range_max;
char* mystrsep(char** stringp, const char* delim)
{
char* start = *stringp;
char* p;
p = (start != NULL) ? strpbrk(start, delim) : NULL;
if (p == NULL)
{
*stringp = NULL;
}
else
{
*p = '\0';
*stringp = p + 1;
}
return start;
}
__global__ void __launch_bounds__(1024) initMap(float* map, int w, int h, size_t pitch, int numX, int numY){
unsigned int idx=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int idy=blockIdx.y*blockDim.y+threadIdx.y;
unsigned int w_th=w/(blockDim.x*gridDim.x);
unsigned int h_th=h/(blockDim.y*gridDim.y);
for(unsigned int i=0; i<h_th; ++i)
{
for(unsigned int j=0; j<w_th; ++j)
{
unsigned int x=idx*w_th+j;
unsigned int y=idy*h_th+i;
if(x<w && y<h)
{
map[x+y*pitch]=-1.0f;
}
}
}
__syncthreads();
}
__device__ void getCoordsBresenham(float *coords, float * range, float * x_o, float * y_o, float * theta_o)
{
__shared__ int x1, y1, x2, y2;
__shared__ float delta_x, delta_y, m;
__shared__ int sign_delta_x, sign_delta_y;
__shared__ float theta_b;
if(threadIdx.x==0)
{
theta_b=*theta_o+blockIdx.x*M_PI/359-M_PI_2;
float s;
float c;
__sincosf(theta_b, &s, &c);
//mapW/H is offset, 0.1f is resolution
x1=(int)floorf(mapW/2+*x_o/resolution);
y1=(int)floorf(mapH/2+*y_o/resolution);
//0.1f for wall thickness, if needed, add to range before mul
x2=(int)floorf(mapW/2+(*x_o +(*range+0.1f)*c)/resolution);
y2=(int)floorf(mapH/2+(*y_o +(*range+0.1f)*s)/resolution);
delta_x=(float)(x2-x1);
delta_y=(float)(y2-y1);
/*
sign_delta_x=1;
if(delta_x<0)sign_delta_x=-1;
sign_delta_y=1;
if(delta_y<0)sign_delta_y=-1;
*/
sign_delta_x=copysignf(1, delta_x);
sign_delta_y=copysignf(1, delta_y);
}
__syncthreads();
if(*range<range_max)
{
int current_x, current_y;
if(fabs(delta_y)>fabs(delta_x))
{
m=delta_x/delta_y;
current_y=y1+sign_delta_y*threadIdx.x;
current_x=x1+rintf(m*(current_y-y1));
//current_x=x1+floorf(0.4999999f+m*(current_y-y1));
}
else
{
m=delta_y/delta_x;
current_x=x1+sign_delta_x*threadIdx.x;
current_y=y1+rintf(m*(current_x-x1));
//current_y=y1+floorf(0.4999999f+m*(current_x-x1));
}
coords[0]=current_x;
coords[1]=current_y;
}
else
{
coords[0]=-1;
coords[1]=-1;
}
if(coords[0]>=0 && coords[0]<mapW && coords[1]>=0 && coords[1]<mapH)
{
coords[2]=hypotf(coords[0]-x1, coords[1]-y1)*resolution;
}
else
{
coords[2]=-1;
}
}
__global__ void updateMapBresenham(float *map, size_t pitch, float *scan_gpu, float x, float y, float theta){
__shared__ float range;
__shared__ int x1, y1, x2, y2;
__shared__ float delta_x, delta_y, m;
__shared__ int sign_delta_x, sign_delta_y;
__shared__ float theta_b;
float coords[3];
if(threadIdx.x==0)
{
range=scan_gpu[blockIdx.x];
}
getCoordsBresenham(coords, &range, &x, &y, &theta);
//printf("coords:%d %d\n", coords[0], coords[1]);
if(coords[2]>=0)
{
//0.1f because going from grid (10cm cell) to meters
float d=coords[2];
int current_x=(int)coords[0];
int current_y=(int)coords[1];
//divide by 100 because rmax is #of cells, ie 500->turn to meters
//float k=1-(d/rmax)*(d/rmax)/100;
//float k=1;
//float k=0.6;
//float s=0.00001425*range*range;
//float s=0.4;
//float s=0.6;
//float expon=((d-range)/s)*((d-range)/s);
float prob;
if(d<range)
{
//sensor model
//prob=0.3+(k/s*__frsqrt_rn(s)+0.2)*__expf(-0.5*expon);
if(d<1.0f)
prob=0.45f;
else
prob=0.45f+(d-1.0f)/6.4f*(0.5f-0.45f);
}
else
{
//sensor model
//prob=0.5+k/s*__frsqrt_rn(s)*__expf(-0.5*expon);
if(d<1.0f)
prob=0.75f;
else
prob=0.75f+(d-1.0f)/6.4f*(0.5f-0.75f);
}
//map[current_x+current_y*pitch]+=__logf(prob/(1-prob));
if (d<=range+0.1f && d<=6.4f)
{
float pr=map[current_x+current_y*pitch];
if(pr==-1.0f)
pr=0.5f;
map[current_x+current_y*pitch]=1.0f-1.0f/(1.0f+prob/(1.0f-prob)*pr/(1.0f-pr));
//printf("-------------------------------------------------------------------updating map\n");
}
}
//if(threadIdx.x==0)
//{
// range=scan_gpu[blockIdx.x];
// theta_b=theta+blockIdx.x*M_PI/359-M_PI_2;
// float s;
// float c;
// __sincosf(theta_b, &s, &c);
// //mapW/H is offset, 0.1f is resolution
// x1=(int)floorf(mapW/2+x/resolution);
// y1=(int)floorf(mapH/2+y/resolution);
// //0.1f for wall thickness, if needed, add to range before mul
// x2=(int)floorf(mapW/2+(x+(range+0.1f)*c)/resolution);
// y2=(int)floorf(mapH/2+(y+(range+0.1f)*s)/resolution);
// delta_x=(float)(x2-x1);
// delta_y=(float)(y2-y1);
// /*
// sign_delta_x=1;
// if(delta_x<0)sign_delta_x=-1;
// sign_delta_y=1;
// if(delta_y<0)sign_delta_y=-1;
// */
// sign_delta_x=copysignf(1, delta_x);
// sign_delta_y=copysignf(1, delta_y);
//}
//__syncthreads();
//if(range<range_max)
//{
// int current_x, current_y;
// if(fabs(delta_y)>fabs(delta_x))
// {
// m=delta_x/delta_y;
// current_y=y1+sign_delta_y*threadIdx.x;
// current_x=x1+rintf(m*(current_y-y1));
// //current_x=x1+floorf(0.4999999f+m*(current_y-y1));
// }
// else
// {
// m=delta_y/delta_x;
// current_x=x1+sign_delta_x*threadIdx.x;
// current_y=y1+rintf(m*(current_x-x1));
// //current_y=y1+floorf(0.4999999f+m*(current_x-x1));
// }
// if(current_x>=0 && current_x<mapW && current_y>=0 && current_y<mapH)
// {
// //0.1f because going from grid (10cm cell) to meters
// float d=hypotf(current_x-x1, current_y-y1)*resolution;
// //divide by 100 because rmax is #of cells, ie 500->turn to meters
// //float k=1-(d/rmax)*(d/rmax)/100;
// //float k=1;
// //float k=0.6;
// //float s=0.00001425*range*range;
// //float s=0.4;
// //float s=0.6;
// //float expon=((d-range)/s)*((d-range)/s);
// float prob;
// if(d<range)
// {
// //sensor model
// //prob=0.3+(k/s*__frsqrt_rn(s)+0.2)*__expf(-0.5*expon);
// if(d<1.0f)
// prob=0.45f;
// else
// prob=0.45f+(d-1.0f)/6.4f*(0.5f-0.45f);
// }
// else
// {
// //sensor model
// //prob=0.5+k/s*__frsqrt_rn(s)*__expf(-0.5*expon);
//
// if(d<1.0f)
// prob=0.75f;
// else
// prob=0.75f+(d-1.0f)/6.4f*(0.5f-0.75f);
//
// }
// //map[current_x+current_y*pitch]+=__logf(prob/(1-prob));
//
// if (d<=range+0.1f && d<=6.4f)
// {
// float pr=map[current_x+current_y*pitch];
// if(pr==-1.0f)
// pr=0.5f;
// map[current_x+current_y*pitch]=1.0f-1.0f/(1.0f+prob/(1.0f-prob)*pr/(1.0f-pr));
// //printf("-------------------------------------------------------------------updating map\n");
// }
// }
// else
// {
// //printf("%d %d\n", current_x, current_y);
// }
//}
//else
//{
// //printf("range: %d \n", range);
//}
}
//__global__ void __launch_bounds__(1024) updateMap(float x, float y, float theta, float* map, float* scan_gpu, size_t pitch, int mapW, int mapH, float rmax){
// __shared__ float scan[360];
// /*first 360 threads load scan*/
// unsigned int scanperthread=360/(blockDim.x*blockDim.y);
// if(scanperthread>1){
// unsigned int ind=(threadIdx.x*blockDim.x+threadIdx.y)*scanperthread;
// unsigned int off;
// for(off=0; off<scanperthread; off++){
// if(ind+off<360)
// scan[ind+off]=scan_gpu[ind+off];
// }
// }
// else{
// unsigned int ind=threadIdx.x*blockDim.x+threadIdx.y;
// if(ind<360){
// scan[ind]=scan_gpu[ind];
// }
// }
// //printf("scan loaded\n");
// __syncthreads();
// float x_local_lu=(blockIdx.x*blockDim.x+threadIdx.x)*1.0/(gridDim.x*blockDim.x)*rmax;
// float y_local_lu=(blockIdx.y*blockDim.y+threadIdx.y)*1.0/(gridDim.y*blockDim.y)*rmax;
// /*to fix: the 10.0 should be s_m_resolution*/
// //float val=tex2D(sensor_model, tex2D(radius, x_local_lu, y_local_lu)*10.0, scan[(int)rint(tex2D(angle, x_local_lu, y_local_lu))]*10.0);
// float val=tex2D(sensor_model, scan[(int)rint(tex2D(angle, x_local_lu, y_local_lu))]*10.0, tex2D(radius, x_local_lu, y_local_lu)*10.0);
// //if(tex2D(radius, x_local_lu, y_local_lu)>scan[(int)rint(tex2D(angle, x_local_lu, y_local_lu))])
// // printf("val:%f\n", val);
// //printf("angle:%d\n", (int)rint(tex2D(angle, x_local_lu, y_local_lu)));
// //printf("val:%f\n", val);
// if (val!=0.5f)
// {
// float x_local=x_local_lu-rmax/2;
// float y_local=rmax/2-y_local_lu;
// x_local=x_local*__cosf(theta)-y_local*__sinf(theta);
// y_local=x_local*__sinf(theta)+y_local*__cosf(theta);
// //int x_map=(int)rint(x_local*cosf(theta)+y_local*sinf(theta)-x+mapW/2.0);
// //int y_map=(int)rint(-x_local*sinf(theta)+y_local*cosf(theta)-y+mapH/2.0);
// int x_map_cell=(int)rint(x_local+x*10.0f+mapW/2.0);
// int y_map_cell=(int)rint(-(y_local+y*10.0f-mapH/2.0));
// /*if(x_map_cell<0 || y_map_cell<0)
// printf("%f %f\n", x_map_cell, y_map_cell);
// */if(x_map_cell<mapH && y_map_cell<mapW ){
// //no size difference between local and global cells, otherwise you'd need to divide by global cell size to get map cell
// //int x_map_cell=(int)rint(x_map);
// //int y_map_cell=(int)rint(y_map);
// if(scan[(int)rint(tex2D(angle, x_local_lu, y_local_lu))]>0.0)
// {
// size_t index=x_map_cell*pitch+y_map_cell;
// //map[index]=0.5f*val+0.5f*map[index];
// map[index]=1-1/(1+map[index]/(1-map[index])*val/(1-val));
// }
// /*
// if(scan[(int)rint(tex2D(angle, x_local_lu, y_local_lu))]>0.0){}
// if(map[index]<0.0f)
// map[index]=val;
// else
// map[index]=0.5f*val+0.5f*map[index];
// */
// }
// }
// __syncthreads();
//}
int main(int argc, char** argv){
float *r;
float *a;
float *s_m;
/*size of the matrix in cells*/
int local_size=500;
//int map_size=1000;
int map_size=1600;
float cell_dim=0.1;
//int map_size_x=1600;
//int map_size_y=880;
//float s_m_resolution=10.0;
//r=(float*)malloc(sizeof(float)*local_size*local_size);
//a=(float*)malloc(sizeof(float)*local_size*local_size);
//s_m=(float*)malloc(sizeof(float)*local_size*local_size*(int)(s_m_resolution*s_m_resolution));
//int loopX=0;
//int loopY=0;
///*initialization of lookups for radius, angle and sensor model*/
//for(loopY=0; loopY<local_size*s_m_resolution; loopY++){
// for(loopX=0; loopX<local_size*s_m_resolution; loopX++){
// if(loopX<local_size && loopY<local_size){
// float x_cell=loopX*cell_dim+cell_dim/2.0f-local_size*cell_dim/2.0f;
// float y_cell=-loopY*cell_dim+cell_dim/2.0f+local_size*cell_dim/2.0f;
// r[loopY*local_size+loopX]=hypotf(x_cell, y_cell);
// a[loopY*local_size+loopX]=(atan2(y_cell, x_cell)+M_PI)/M_PI*180.0f;
// //a[loopY*local_size+loopX]=atan2(y_cell, x_cell)/M_PI*180.0f;
// }
// if (abs(loopX-loopY)<s_m_resolution/2.0){
// s_m[loopY*local_size*((int)s_m_resolution)+loopX]=0.95f;
// }
// else{
// if (loopY<loopX){
// s_m[loopY*local_size*((int)s_m_resolution)+loopX]=0.05f;
// }
// else{
// /*
// float min=(local_size*10<loopX+s_m_resolution?local_size*10:loopX+s_m_resolution);
// if (loopY> min){
// s_m[loopX*local_size*10+loopY]=0.5f;
// }
// */
// s_m[loopY*local_size*((int)s_m_resolution)+loopX]=0.5f;
// }
// }
// }
//}
///*setting filter mode for the textures. It's linear for radius and angle so I get interpolation "for free"*/
////printf("radius 0:%f\n", r[0]);
////printf("angle 0:%f\n", a[0]);
////getchar();
//sensor_model.filterMode=hipFilterModePoint;
//radius.filterMode=hipFilterModeLinear;
//angle.filterMode=hipFilterModeLinear;
/*creating the cudaArrays that will contain the textures*/
hipChannelFormatDesc cf=hipCreateChannelDesc<float>();
/*hipArray *r_gpu;
checkCudaErrors(hipMallocArray(&r_gpu, &cf, local_size, local_size));
checkCudaErrors(hipMemcpyToArray(r_gpu, 0, 0, r, sizeof(float)*local_size*local_size, hipMemcpyHostToDevice));
hipArray *a_gpu;
checkCudaErrors(hipMallocArray(&a_gpu, &cf, local_size, local_size));
checkCudaErrors(hipMemcpyToArray(a_gpu, 0, 0, a, sizeof(float)*local_size*local_size, hipMemcpyHostToDevice));
float *s_m_gpu;
size_t pitch_s;
checkCudaErrors(hipMallocPitch(&s_m_gpu, &pitch_s, local_size*((int)s_m_resolution)*sizeof(float), local_size*((int)s_m_resolution)));
checkCudaErrors(hipMemcpy2D(s_m_gpu, pitch_s, s_m, local_size*((int)s_m_resolution)*sizeof(float), local_size*((int)s_m_resolution)*sizeof(float), local_size*((int)s_m_resolution), hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy2D(s_m, local_size*((int)s_m_resolution)*sizeof(float), s_m_gpu, pitch_s, local_size*((int)s_m_resolution)*sizeof(float), local_size*((int)s_m_resolution), hipMemcpyDeviceToHost));
FILE *s_m_ff;
FILE *rad;
FILE *ang;
s_m_ff=fopen("sensor.dat", "w");
rad=fopen("radius.dat", "w");
ang=fopen("angle.dat", "w");
if(s_m_ff!=NULL){
fwrite(s_m, sizeof(float), local_size*((int)s_m_resolution)*local_size*((int)s_m_resolution), s_m_ff);
}
if(rad!=NULL)
{
fwrite(r, sizeof(float), local_size*local_size, rad);
}
if(ang!=NULL)
{
fwrite(a, sizeof(float), local_size*local_size, ang);
}
fclose(s_m_ff);
fclose(rad);
fclose(ang);
*/
/*map initialization and texture binding*/
int width=map_size;
int height=map_size;
float res=0.025f;
float rmax=50.0f;
float* map;
size_t pitch;
checkCudaErrors(hipMallocPitch(&map,&pitch,width*sizeof(float), height));
checkCudaErrors(hipMemcpyToSymbol(mapW, &width, sizeof(int)));
checkCudaErrors(hipMemcpyToSymbol(mapH, &height, sizeof(int)));
checkCudaErrors(hipMemcpyToSymbol(resolution, &res, sizeof(float)));
checkCudaErrors(hipMemcpyToSymbol(range_max, &rmax, sizeof(float)));
dim3 numThr(32, 32);
dim3 numBlocks(width/numThr.x, height/numThr.y);
hipLaunchKernelGGL(( initMap) , dim3(numBlocks), dim3(numThr), 0, 0, map, width, height, pitch/sizeof(float), 1, 1);
hipError_t err=hipGetLastError();
if (err != hipSuccess)
printf("Error: %s\n", hipGetErrorString(err));
checkCudaErrors(hipDeviceSynchronize());
float *mapsave;
hipError_t status=hipHostMalloc(&mapsave, width*height*sizeof(float));
if(status!=hipSuccess)
printf("error allocating pinned memory\n");
size_t pitchSave=sizeof(float)*width;
checkCudaErrors(hipMemcpy2D(mapsave, pitchSave, map, pitch, width*sizeof(float), height, hipMemcpyDeviceToHost));
FILE *img;
img=fopen("mapinit.dat", "w");
if(img!=NULL){
fwrite(mapsave, sizeof(float), width*height, img);
/*int ptrIndex=0;
for(ptrIndex=0; ptrIndex<width*height; ptrIndex++){
float elem=mapsave[ptrIndex];
fprintf(img, "%f", elem);
if(ptrIndex%width==0 && ptrIndex!=0)
fprintf(img, "\n");
else
fprintf(img, " ");
}*/
}
hipHostFree(mapsave);
fclose(img);
/*checkCudaErrors(hipBindTexture2D(0,sensor_model, s_m_gpu, local_size*((int)s_m_resolution), local_size*((int)s_m_resolution), pitch_s));
checkCudaErrors(hipBindTextureToArray(radius, r_gpu));
checkCudaErrors(hipBindTextureToArray(angle, a_gpu));
*/
/*loading the range readings from file*/
FILE *f;
f=fopen("fr079-sm.log", "r");
float ares=2*M_PI/360.0f;
int numReadings=(int)(M_PI*2/ares);
//float amin=-M_PI;
float amin=0;
float areadmin=0.0f;
int astart=(int)((areadmin-amin)/ares);
float *xs=(float*)malloc(sizeof(float));
float *ys=(float*)malloc(sizeof(float));
float *thetas=(float*)malloc(sizeof(float));
int *numScans=(int*)malloc(sizeof(float));
float **scans=(float**)malloc(sizeof(float*));
int iter=0;
int len=0;
if (f!=NULL){
char *buffer=(char*)malloc(4096*sizeof(char));
int line=0;
while(fgets(buffer, 4096, f)){
line++;
int numElem=-1;
sscanf(buffer, "FLASER %d", &numElem);
if (numElem==-1){
continue;
}
numElem+=11;
char **a;
char **res;
res=new char* [numElem];
for(a=res; (*a=mystrsep(&buffer, " "))!=NULL;){
if(**a!='\0')
if(++a>=&res[numElem])
break;
}
int i, j;
numScans[iter]=atoi(res[1]);
float *readings_f=(float*)malloc(numReadings*sizeof(float));
/*for(j=0; j<astart; j++){
readings_f[j]=-1.0;
}*/
for(i=2; i<2+atoi(res[1]); i++){
sscanf(res[i], "%f", &readings_f[i-2]);
//readings_f[astart+i-2]*=100;
}
float x=(float)atof(res[i]);
//float x=(float)atof(res[i])*10;
xs[iter]=x;
//float y=(float)atof(res[i+1])*10;
float y=(float)atof(res[i+1]);
ys[iter]=y;
float theta=(float)atof(res[i+2]);
thetas[iter]=theta;
scans[iter]=readings_f;
iter++;
float *xs_new=(float*)realloc(xs, (iter+1)*sizeof(float));
float *ys_new=(float*)realloc(ys, (iter+1)*sizeof(float));
float *thetas_new=(float*)realloc(thetas, (iter+1)*sizeof(float));
int *numScans_new=(int*)realloc(numScans, (iter+1)*sizeof(int));
float **scans_new=(float**)realloc(scans, (iter+1)*sizeof(float*));
if (xs_new!=NULL)
xs=xs_new;
else
printf("no xs");
if (ys_new!=NULL)
ys=ys_new;
else
printf("no ys");
if (thetas_new!=NULL)
thetas=thetas_new;
else
printf("no thetas");
if (scans_new!=NULL)
scans=scans_new;
if(numScans_new!=NULL)
numScans=numScans_new;
else
printf("no scans");
buffer=(char*)malloc(4096*sizeof(char));
}
xs=(float*)realloc(xs, iter*sizeof(float));
ys=(float*)realloc(ys, iter*sizeof(float));
thetas=(float*)realloc(thetas, iter*sizeof(float));
numScans=(int*)realloc(numScans, iter*sizeof(int));
scans=(float**)realloc(scans, iter*sizeof(float*));
len=iter;
/*int j;
for(j=0; j<iter; j++){
printf("xs:%f\t", xs[j]);
printf("ys:%f\t", ys[j]);
printf("thetas:%f\n", thetas[j]);
int k;
for(k=0; k<numReadings; k++){
float * s=scans[j];
printf("%f\t", s[k]);
}
printf("\n");
}
printf("lines read:%d\n", line);
*/
}
int index;
float tot_time=0.0f;
for(index=0; index<len; index++){
/*taking one range reading at a time*/
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
float* scan=scans[index];
float x_h;
float y_h;
float theta_h;
/*checkCudaErrors(hipHostMalloc(&x_h, sizeof(float)));
checkCudaErrors(hipHostMalloc(&y_h, sizeof(float)));
checkCudaErrors(hipHostMalloc(&theta_h, sizeof(float)));*/
x_h=xs[index];
y_h=ys[index];
theta_h=thetas[index];
printf("position:%f %f %f\n", x_h, y_h, theta_h);
float *scan_gpu;
checkCudaErrors(hipMalloc(&scan_gpu, sizeof(float)*numReadings));
checkCudaErrors(hipMemcpy(scan_gpu, scan, numReadings*sizeof(float), hipMemcpyHostToDevice));
/*int numTU=32;
int numBU=(int)ceil((float)local_size/numTU);
printf("num blocks:%d\n", numBU);
dim3 numThrU(numTU, numTU);
dim3 numBlU(numBU, numBU);
*/
/*checkCudaErrors(hipMemcpyToSymbol(x, &x_h, sizeof(float)));
checkCudaErrors(hipMemcpyToSymbol(y, &y_h, sizeof(float)));
checkCudaErrors(hipMemcpyToSymbol(theta, &theta_h, sizeof(float)));
*/
//updateMap<<<numBlU, numThrU>>>(x, y, theta*M_PI/180.0f, map, scan_gpu, pitch/sizeof(float), width, height, local_size);
hipLaunchKernelGGL(( updateMapBresenham), dim3(360), dim3(256), 0, 0, map, pitch/sizeof(float),scan_gpu, x_h, y_h, theta_h);
checkCudaErrors(hipFree(scan_gpu));
/*checkCudaErrors(hipHostFree(x_h));
checkCudaErrors(hipHostFree(y_h));
checkCudaErrors(hipHostFree(theta_h));*/
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
tot_time+=time;
hipError_t err=hipGetLastError();
if (err != hipSuccess){
printf("Error: %s\n", hipGetErrorString(err));
return -1;
}
if(index%100==0){
float *mapsave;
/*saving map at every iteration, just for testing purposes*/
hipHostMalloc(&mapsave, width*height*sizeof(float));
size_t pitchSave=sizeof(float)*width;
checkCudaErrors(hipMemcpy2D(mapsave, pitchSave, map, pitch, width*sizeof(float), height, hipMemcpyDeviceToHost));
FILE *img;
char filename[40];
sprintf(filename, "map%d.dat", index);
img=fopen(filename, "wb");
if(img!=NULL){
fwrite(mapsave, sizeof(float), width*height, img);
/*int ptrIndex=0;
for(ptrIndex=0; ptrIndex<width*height; ptrIndex++){
float elem=mapsave[ptrIndex];
fprintf(img, "%f ", elem);
if(ptrIndex%width==0 && ptrIndex!=0)
fprintf(img, "\n");
else
fprintf(img, " ");
}*/
}
hipHostFree(mapsave);
fclose(img);
}
}
/*unbinding textures and cleanup*/
/*checkCudaErrors(hipUnbindTexture(radius));
checkCudaErrors(hipUnbindTexture(angle));
checkCudaErrors(hipUnbindTexture(sensor_model));
checkCudaErrors(hipFreeArray(r_gpu));
checkCudaErrors(hipFreeArray(a_gpu));
checkCudaErrors(hipFree(s_m_gpu));
free(r);
free(a);
free(s_m);
*/
float avg_time=tot_time/len;
printf("avg time:%f\n", avg_time);
getchar();
}
| 69cbb4164fb9115d2ae1ba8a5f703ac8254a9b02.cu | #include <stdlib.h>
#include <stdio.h>
#include "string.h"
#include <stddef.h>
#define _USE_MATH_DEFINES
#include "math.h"
#include "helper_cuda.h"
//texture <float, 2, cudaReadModeElementType> radius;
//texture <float, 2, cudaReadModeElementType> angle;
//texture <float, 2, cudaReadModeElementType> sensor_model;
//__constant__ float x;
//__constant__ float y;
//__constant__ float theta;
__constant__ int mapW;
__constant__ int mapH;
__constant__ float resolution;
__constant__ float range_max;
char* mystrsep(char** stringp, const char* delim)
{
char* start = *stringp;
char* p;
p = (start != NULL) ? strpbrk(start, delim) : NULL;
if (p == NULL)
{
*stringp = NULL;
}
else
{
*p = '\0';
*stringp = p + 1;
}
return start;
}
__global__ void __launch_bounds__(1024) initMap(float* map, int w, int h, size_t pitch, int numX, int numY){
unsigned int idx=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int idy=blockIdx.y*blockDim.y+threadIdx.y;
unsigned int w_th=w/(blockDim.x*gridDim.x);
unsigned int h_th=h/(blockDim.y*gridDim.y);
for(unsigned int i=0; i<h_th; ++i)
{
for(unsigned int j=0; j<w_th; ++j)
{
unsigned int x=idx*w_th+j;
unsigned int y=idy*h_th+i;
if(x<w && y<h)
{
map[x+y*pitch]=-1.0f;
}
}
}
__syncthreads();
}
__device__ void getCoordsBresenham(float *coords, float * range, float * x_o, float * y_o, float * theta_o)
{
__shared__ int x1, y1, x2, y2;
__shared__ float delta_x, delta_y, m;
__shared__ int sign_delta_x, sign_delta_y;
__shared__ float theta_b;
if(threadIdx.x==0)
{
theta_b=*theta_o+blockIdx.x*M_PI/359-M_PI_2;
float s;
float c;
__sincosf(theta_b, &s, &c);
//mapW/H is offset, 0.1f is resolution
x1=(int)floorf(mapW/2+*x_o/resolution);
y1=(int)floorf(mapH/2+*y_o/resolution);
//0.1f for wall thickness, if needed, add to range before mul
x2=(int)floorf(mapW/2+(*x_o +(*range+0.1f)*c)/resolution);
y2=(int)floorf(mapH/2+(*y_o +(*range+0.1f)*s)/resolution);
delta_x=(float)(x2-x1);
delta_y=(float)(y2-y1);
/*
sign_delta_x=1;
if(delta_x<0)sign_delta_x=-1;
sign_delta_y=1;
if(delta_y<0)sign_delta_y=-1;
*/
sign_delta_x=copysignf(1, delta_x);
sign_delta_y=copysignf(1, delta_y);
}
__syncthreads();
if(*range<range_max)
{
int current_x, current_y;
if(fabs(delta_y)>fabs(delta_x))
{
m=delta_x/delta_y;
current_y=y1+sign_delta_y*threadIdx.x;
current_x=x1+rintf(m*(current_y-y1));
//current_x=x1+floorf(0.4999999f+m*(current_y-y1));
}
else
{
m=delta_y/delta_x;
current_x=x1+sign_delta_x*threadIdx.x;
current_y=y1+rintf(m*(current_x-x1));
//current_y=y1+floorf(0.4999999f+m*(current_x-x1));
}
coords[0]=current_x;
coords[1]=current_y;
}
else
{
coords[0]=-1;
coords[1]=-1;
}
if(coords[0]>=0 && coords[0]<mapW && coords[1]>=0 && coords[1]<mapH)
{
coords[2]=hypotf(coords[0]-x1, coords[1]-y1)*resolution;
}
else
{
coords[2]=-1;
}
}
__global__ void updateMapBresenham(float *map, size_t pitch, float *scan_gpu, float x, float y, float theta){
__shared__ float range;
__shared__ int x1, y1, x2, y2;
__shared__ float delta_x, delta_y, m;
__shared__ int sign_delta_x, sign_delta_y;
__shared__ float theta_b;
float coords[3];
if(threadIdx.x==0)
{
range=scan_gpu[blockIdx.x];
}
getCoordsBresenham(coords, &range, &x, &y, &theta);
//printf("coords:%d %d\n", coords[0], coords[1]);
if(coords[2]>=0)
{
//0.1f because going from grid (10cm cell) to meters
float d=coords[2];
int current_x=(int)coords[0];
int current_y=(int)coords[1];
//divide by 100 because rmax is #of cells, ie 500->turn to meters
//float k=1-(d/rmax)*(d/rmax)/100;
//float k=1;
//float k=0.6;
//float s=0.00001425*range*range;
//float s=0.4;
//float s=0.6;
//float expon=((d-range)/s)*((d-range)/s);
float prob;
if(d<range)
{
//sensor model
//prob=0.3+(k/s*__frsqrt_rn(s)+0.2)*__expf(-0.5*expon);
if(d<1.0f)
prob=0.45f;
else
prob=0.45f+(d-1.0f)/6.4f*(0.5f-0.45f);
}
else
{
//sensor model
//prob=0.5+k/s*__frsqrt_rn(s)*__expf(-0.5*expon);
if(d<1.0f)
prob=0.75f;
else
prob=0.75f+(d-1.0f)/6.4f*(0.5f-0.75f);
}
//map[current_x+current_y*pitch]+=__logf(prob/(1-prob));
if (d<=range+0.1f && d<=6.4f)
{
float pr=map[current_x+current_y*pitch];
if(pr==-1.0f)
pr=0.5f;
map[current_x+current_y*pitch]=1.0f-1.0f/(1.0f+prob/(1.0f-prob)*pr/(1.0f-pr));
//printf("-------------------------------------------------------------------updating map\n");
}
}
//if(threadIdx.x==0)
//{
// range=scan_gpu[blockIdx.x];
// theta_b=theta+blockIdx.x*M_PI/359-M_PI_2;
// float s;
// float c;
// __sincosf(theta_b, &s, &c);
// //mapW/H is offset, 0.1f is resolution
// x1=(int)floorf(mapW/2+x/resolution);
// y1=(int)floorf(mapH/2+y/resolution);
// //0.1f for wall thickness, if needed, add to range before mul
// x2=(int)floorf(mapW/2+(x+(range+0.1f)*c)/resolution);
// y2=(int)floorf(mapH/2+(y+(range+0.1f)*s)/resolution);
// delta_x=(float)(x2-x1);
// delta_y=(float)(y2-y1);
// /*
// sign_delta_x=1;
// if(delta_x<0)sign_delta_x=-1;
// sign_delta_y=1;
// if(delta_y<0)sign_delta_y=-1;
// */
// sign_delta_x=copysignf(1, delta_x);
// sign_delta_y=copysignf(1, delta_y);
//}
//__syncthreads();
//if(range<range_max)
//{
// int current_x, current_y;
// if(fabs(delta_y)>fabs(delta_x))
// {
// m=delta_x/delta_y;
// current_y=y1+sign_delta_y*threadIdx.x;
// current_x=x1+rintf(m*(current_y-y1));
// //current_x=x1+floorf(0.4999999f+m*(current_y-y1));
// }
// else
// {
// m=delta_y/delta_x;
// current_x=x1+sign_delta_x*threadIdx.x;
// current_y=y1+rintf(m*(current_x-x1));
// //current_y=y1+floorf(0.4999999f+m*(current_x-x1));
// }
// if(current_x>=0 && current_x<mapW && current_y>=0 && current_y<mapH)
// {
// //0.1f because going from grid (10cm cell) to meters
// float d=hypotf(current_x-x1, current_y-y1)*resolution;
// //divide by 100 because rmax is #of cells, ie 500->turn to meters
// //float k=1-(d/rmax)*(d/rmax)/100;
// //float k=1;
// //float k=0.6;
// //float s=0.00001425*range*range;
// //float s=0.4;
// //float s=0.6;
// //float expon=((d-range)/s)*((d-range)/s);
// float prob;
// if(d<range)
// {
// //sensor model
// //prob=0.3+(k/s*__frsqrt_rn(s)+0.2)*__expf(-0.5*expon);
// if(d<1.0f)
// prob=0.45f;
// else
// prob=0.45f+(d-1.0f)/6.4f*(0.5f-0.45f);
// }
// else
// {
// //sensor model
// //prob=0.5+k/s*__frsqrt_rn(s)*__expf(-0.5*expon);
//
// if(d<1.0f)
// prob=0.75f;
// else
// prob=0.75f+(d-1.0f)/6.4f*(0.5f-0.75f);
//
// }
// //map[current_x+current_y*pitch]+=__logf(prob/(1-prob));
//
// if (d<=range+0.1f && d<=6.4f)
// {
// float pr=map[current_x+current_y*pitch];
// if(pr==-1.0f)
// pr=0.5f;
// map[current_x+current_y*pitch]=1.0f-1.0f/(1.0f+prob/(1.0f-prob)*pr/(1.0f-pr));
// //printf("-------------------------------------------------------------------updating map\n");
// }
// }
// else
// {
// //printf("%d %d\n", current_x, current_y);
// }
//}
//else
//{
// //printf("range: %d \n", range);
//}
}
//__global__ void __launch_bounds__(1024) updateMap(float x, float y, float theta, float* map, float* scan_gpu, size_t pitch, int mapW, int mapH, float rmax){
// __shared__ float scan[360];
// /*first 360 threads load scan*/
// unsigned int scanperthread=360/(blockDim.x*blockDim.y);
// if(scanperthread>1){
// unsigned int ind=(threadIdx.x*blockDim.x+threadIdx.y)*scanperthread;
// unsigned int off;
// for(off=0; off<scanperthread; off++){
// if(ind+off<360)
// scan[ind+off]=scan_gpu[ind+off];
// }
// }
// else{
// unsigned int ind=threadIdx.x*blockDim.x+threadIdx.y;
// if(ind<360){
// scan[ind]=scan_gpu[ind];
// }
// }
// //printf("scan loaded\n");
// __syncthreads();
// float x_local_lu=(blockIdx.x*blockDim.x+threadIdx.x)*1.0/(gridDim.x*blockDim.x)*rmax;
// float y_local_lu=(blockIdx.y*blockDim.y+threadIdx.y)*1.0/(gridDim.y*blockDim.y)*rmax;
// /*to fix: the 10.0 should be s_m_resolution*/
// //float val=tex2D(sensor_model, tex2D(radius, x_local_lu, y_local_lu)*10.0, scan[(int)rint(tex2D(angle, x_local_lu, y_local_lu))]*10.0);
// float val=tex2D(sensor_model, scan[(int)rint(tex2D(angle, x_local_lu, y_local_lu))]*10.0, tex2D(radius, x_local_lu, y_local_lu)*10.0);
// //if(tex2D(radius, x_local_lu, y_local_lu)>scan[(int)rint(tex2D(angle, x_local_lu, y_local_lu))])
// // printf("val:%f\n", val);
// //printf("angle:%d\n", (int)rint(tex2D(angle, x_local_lu, y_local_lu)));
// //printf("val:%f\n", val);
// if (val!=0.5f)
// {
// float x_local=x_local_lu-rmax/2;
// float y_local=rmax/2-y_local_lu;
// x_local=x_local*__cosf(theta)-y_local*__sinf(theta);
// y_local=x_local*__sinf(theta)+y_local*__cosf(theta);
// //int x_map=(int)rint(x_local*cosf(theta)+y_local*sinf(theta)-x+mapW/2.0);
// //int y_map=(int)rint(-x_local*sinf(theta)+y_local*cosf(theta)-y+mapH/2.0);
// int x_map_cell=(int)rint(x_local+x*10.0f+mapW/2.0);
// int y_map_cell=(int)rint(-(y_local+y*10.0f-mapH/2.0));
// /*if(x_map_cell<0 || y_map_cell<0)
// printf("%f %f\n", x_map_cell, y_map_cell);
// */if(x_map_cell<mapH && y_map_cell<mapW ){
// //no size difference between local and global cells, otherwise you'd need to divide by global cell size to get map cell
// //int x_map_cell=(int)rint(x_map);
// //int y_map_cell=(int)rint(y_map);
// if(scan[(int)rint(tex2D(angle, x_local_lu, y_local_lu))]>0.0)
// {
// size_t index=x_map_cell*pitch+y_map_cell;
// //map[index]=0.5f*val+0.5f*map[index];
// map[index]=1-1/(1+map[index]/(1-map[index])*val/(1-val));
// }
// /*
// if(scan[(int)rint(tex2D(angle, x_local_lu, y_local_lu))]>0.0){}
// if(map[index]<0.0f)
// map[index]=val;
// else
// map[index]=0.5f*val+0.5f*map[index];
// */
// }
// }
// __syncthreads();
//}
int main(int argc, char** argv){
float *r;
float *a;
float *s_m;
/*size of the matrix in cells*/
int local_size=500;
//int map_size=1000;
int map_size=1600;
float cell_dim=0.1;
//int map_size_x=1600;
//int map_size_y=880;
//float s_m_resolution=10.0;
//r=(float*)malloc(sizeof(float)*local_size*local_size);
//a=(float*)malloc(sizeof(float)*local_size*local_size);
//s_m=(float*)malloc(sizeof(float)*local_size*local_size*(int)(s_m_resolution*s_m_resolution));
//int loopX=0;
//int loopY=0;
///*initialization of lookups for radius, angle and sensor model*/
//for(loopY=0; loopY<local_size*s_m_resolution; loopY++){
// for(loopX=0; loopX<local_size*s_m_resolution; loopX++){
// if(loopX<local_size && loopY<local_size){
// float x_cell=loopX*cell_dim+cell_dim/2.0f-local_size*cell_dim/2.0f;
// float y_cell=-loopY*cell_dim+cell_dim/2.0f+local_size*cell_dim/2.0f;
// r[loopY*local_size+loopX]=hypotf(x_cell, y_cell);
// a[loopY*local_size+loopX]=(atan2(y_cell, x_cell)+M_PI)/M_PI*180.0f;
// //a[loopY*local_size+loopX]=atan2(y_cell, x_cell)/M_PI*180.0f;
// }
// if (abs(loopX-loopY)<s_m_resolution/2.0){
// s_m[loopY*local_size*((int)s_m_resolution)+loopX]=0.95f;
// }
// else{
// if (loopY<loopX){
// s_m[loopY*local_size*((int)s_m_resolution)+loopX]=0.05f;
// }
// else{
// /*
// float min=(local_size*10<loopX+s_m_resolution?local_size*10:loopX+s_m_resolution);
// if (loopY> min){
// s_m[loopX*local_size*10+loopY]=0.5f;
// }
// */
// s_m[loopY*local_size*((int)s_m_resolution)+loopX]=0.5f;
// }
// }
// }
//}
///*setting filter mode for the textures. It's linear for radius and angle so I get interpolation "for free"*/
////printf("radius 0:%f\n", r[0]);
////printf("angle 0:%f\n", a[0]);
////getchar();
//sensor_model.filterMode=cudaFilterModePoint;
//radius.filterMode=cudaFilterModeLinear;
//angle.filterMode=cudaFilterModeLinear;
/*creating the cudaArrays that will contain the textures*/
cudaChannelFormatDesc cf=cudaCreateChannelDesc<float>();
/*cudaArray *r_gpu;
checkCudaErrors(cudaMallocArray(&r_gpu, &cf, local_size, local_size));
checkCudaErrors(cudaMemcpyToArray(r_gpu, 0, 0, r, sizeof(float)*local_size*local_size, cudaMemcpyHostToDevice));
cudaArray *a_gpu;
checkCudaErrors(cudaMallocArray(&a_gpu, &cf, local_size, local_size));
checkCudaErrors(cudaMemcpyToArray(a_gpu, 0, 0, a, sizeof(float)*local_size*local_size, cudaMemcpyHostToDevice));
float *s_m_gpu;
size_t pitch_s;
checkCudaErrors(cudaMallocPitch(&s_m_gpu, &pitch_s, local_size*((int)s_m_resolution)*sizeof(float), local_size*((int)s_m_resolution)));
checkCudaErrors(cudaMemcpy2D(s_m_gpu, pitch_s, s_m, local_size*((int)s_m_resolution)*sizeof(float), local_size*((int)s_m_resolution)*sizeof(float), local_size*((int)s_m_resolution), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy2D(s_m, local_size*((int)s_m_resolution)*sizeof(float), s_m_gpu, pitch_s, local_size*((int)s_m_resolution)*sizeof(float), local_size*((int)s_m_resolution), cudaMemcpyDeviceToHost));
FILE *s_m_ff;
FILE *rad;
FILE *ang;
s_m_ff=fopen("sensor.dat", "w");
rad=fopen("radius.dat", "w");
ang=fopen("angle.dat", "w");
if(s_m_ff!=NULL){
fwrite(s_m, sizeof(float), local_size*((int)s_m_resolution)*local_size*((int)s_m_resolution), s_m_ff);
}
if(rad!=NULL)
{
fwrite(r, sizeof(float), local_size*local_size, rad);
}
if(ang!=NULL)
{
fwrite(a, sizeof(float), local_size*local_size, ang);
}
fclose(s_m_ff);
fclose(rad);
fclose(ang);
*/
/*map initialization and texture binding*/
int width=map_size;
int height=map_size;
float res=0.025f;
float rmax=50.0f;
float* map;
size_t pitch;
checkCudaErrors(cudaMallocPitch(&map,&pitch,width*sizeof(float), height));
checkCudaErrors(cudaMemcpyToSymbol(mapW, &width, sizeof(int)));
checkCudaErrors(cudaMemcpyToSymbol(mapH, &height, sizeof(int)));
checkCudaErrors(cudaMemcpyToSymbol(resolution, &res, sizeof(float)));
checkCudaErrors(cudaMemcpyToSymbol(range_max, &rmax, sizeof(float)));
dim3 numThr(32, 32);
dim3 numBlocks(width/numThr.x, height/numThr.y);
initMap <<<numBlocks, numThr>>> (map, width, height, pitch/sizeof(float), 1, 1);
cudaError_t err=cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
checkCudaErrors(cudaDeviceSynchronize());
float *mapsave;
cudaError_t status=cudaMallocHost(&mapsave, width*height*sizeof(float));
if(status!=cudaSuccess)
printf("error allocating pinned memory\n");
size_t pitchSave=sizeof(float)*width;
checkCudaErrors(cudaMemcpy2D(mapsave, pitchSave, map, pitch, width*sizeof(float), height, cudaMemcpyDeviceToHost));
FILE *img;
img=fopen("mapinit.dat", "w");
if(img!=NULL){
fwrite(mapsave, sizeof(float), width*height, img);
/*int ptrIndex=0;
for(ptrIndex=0; ptrIndex<width*height; ptrIndex++){
float elem=mapsave[ptrIndex];
fprintf(img, "%f", elem);
if(ptrIndex%width==0 && ptrIndex!=0)
fprintf(img, "\n");
else
fprintf(img, " ");
}*/
}
cudaFreeHost(mapsave);
fclose(img);
/*checkCudaErrors(cudaBindTexture2D(0,sensor_model, s_m_gpu, local_size*((int)s_m_resolution), local_size*((int)s_m_resolution), pitch_s));
checkCudaErrors(cudaBindTextureToArray(radius, r_gpu));
checkCudaErrors(cudaBindTextureToArray(angle, a_gpu));
*/
/*loading the range readings from file*/
FILE *f;
f=fopen("fr079-sm.log", "r");
float ares=2*M_PI/360.0f;
int numReadings=(int)(M_PI*2/ares);
//float amin=-M_PI;
float amin=0;
float areadmin=0.0f;
int astart=(int)((areadmin-amin)/ares);
float *xs=(float*)malloc(sizeof(float));
float *ys=(float*)malloc(sizeof(float));
float *thetas=(float*)malloc(sizeof(float));
int *numScans=(int*)malloc(sizeof(float));
float **scans=(float**)malloc(sizeof(float*));
int iter=0;
int len=0;
if (f!=NULL){
char *buffer=(char*)malloc(4096*sizeof(char));
int line=0;
while(fgets(buffer, 4096, f)){
line++;
int numElem=-1;
sscanf(buffer, "FLASER %d", &numElem);
if (numElem==-1){
continue;
}
numElem+=11;
char **a;
char **res;
res=new char* [numElem];
for(a=res; (*a=mystrsep(&buffer, " "))!=NULL;){
if(**a!='\0')
if(++a>=&res[numElem])
break;
}
int i, j;
numScans[iter]=atoi(res[1]);
float *readings_f=(float*)malloc(numReadings*sizeof(float));
/*for(j=0; j<astart; j++){
readings_f[j]=-1.0;
}*/
for(i=2; i<2+atoi(res[1]); i++){
sscanf(res[i], "%f", &readings_f[i-2]);
//readings_f[astart+i-2]*=100;
}
float x=(float)atof(res[i]);
//float x=(float)atof(res[i])*10;
xs[iter]=x;
//float y=(float)atof(res[i+1])*10;
float y=(float)atof(res[i+1]);
ys[iter]=y;
float theta=(float)atof(res[i+2]);
thetas[iter]=theta;
scans[iter]=readings_f;
iter++;
float *xs_new=(float*)realloc(xs, (iter+1)*sizeof(float));
float *ys_new=(float*)realloc(ys, (iter+1)*sizeof(float));
float *thetas_new=(float*)realloc(thetas, (iter+1)*sizeof(float));
int *numScans_new=(int*)realloc(numScans, (iter+1)*sizeof(int));
float **scans_new=(float**)realloc(scans, (iter+1)*sizeof(float*));
if (xs_new!=NULL)
xs=xs_new;
else
printf("no xs");
if (ys_new!=NULL)
ys=ys_new;
else
printf("no ys");
if (thetas_new!=NULL)
thetas=thetas_new;
else
printf("no thetas");
if (scans_new!=NULL)
scans=scans_new;
if(numScans_new!=NULL)
numScans=numScans_new;
else
printf("no scans");
buffer=(char*)malloc(4096*sizeof(char));
}
xs=(float*)realloc(xs, iter*sizeof(float));
ys=(float*)realloc(ys, iter*sizeof(float));
thetas=(float*)realloc(thetas, iter*sizeof(float));
numScans=(int*)realloc(numScans, iter*sizeof(int));
scans=(float**)realloc(scans, iter*sizeof(float*));
len=iter;
/*int j;
for(j=0; j<iter; j++){
printf("xs:%f\t", xs[j]);
printf("ys:%f\t", ys[j]);
printf("thetas:%f\n", thetas[j]);
int k;
for(k=0; k<numReadings; k++){
float * s=scans[j];
printf("%f\t", s[k]);
}
printf("\n");
}
printf("lines read:%d\n", line);
*/
}
int index;
float tot_time=0.0f;
for(index=0; index<len; index++){
/*taking one range reading at a time*/
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float* scan=scans[index];
float x_h;
float y_h;
float theta_h;
/*checkCudaErrors(cudaMallocHost(&x_h, sizeof(float)));
checkCudaErrors(cudaMallocHost(&y_h, sizeof(float)));
checkCudaErrors(cudaMallocHost(&theta_h, sizeof(float)));*/
x_h=xs[index];
y_h=ys[index];
theta_h=thetas[index];
printf("position:%f %f %f\n", x_h, y_h, theta_h);
float *scan_gpu;
checkCudaErrors(cudaMalloc(&scan_gpu, sizeof(float)*numReadings));
checkCudaErrors(cudaMemcpy(scan_gpu, scan, numReadings*sizeof(float), cudaMemcpyHostToDevice));
/*int numTU=32;
int numBU=(int)ceil((float)local_size/numTU);
printf("num blocks:%d\n", numBU);
dim3 numThrU(numTU, numTU);
dim3 numBlU(numBU, numBU);
*/
/*checkCudaErrors(cudaMemcpyToSymbol(x, &x_h, sizeof(float)));
checkCudaErrors(cudaMemcpyToSymbol(y, &y_h, sizeof(float)));
checkCudaErrors(cudaMemcpyToSymbol(theta, &theta_h, sizeof(float)));
*/
//updateMap<<<numBlU, numThrU>>>(x, y, theta*M_PI/180.0f, map, scan_gpu, pitch/sizeof(float), width, height, local_size);
updateMapBresenham<<<360, 256>>>(map, pitch/sizeof(float),scan_gpu, x_h, y_h, theta_h);
checkCudaErrors(cudaFree(scan_gpu));
/*checkCudaErrors(cudaFreeHost(x_h));
checkCudaErrors(cudaFreeHost(y_h));
checkCudaErrors(cudaFreeHost(theta_h));*/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
tot_time+=time;
cudaError_t err=cudaGetLastError();
if (err != cudaSuccess){
printf("Error: %s\n", cudaGetErrorString(err));
return -1;
}
if(index%100==0){
float *mapsave;
/*saving map at every iteration, just for testing purposes*/
cudaMallocHost(&mapsave, width*height*sizeof(float));
size_t pitchSave=sizeof(float)*width;
checkCudaErrors(cudaMemcpy2D(mapsave, pitchSave, map, pitch, width*sizeof(float), height, cudaMemcpyDeviceToHost));
FILE *img;
char filename[40];
sprintf(filename, "map%d.dat", index);
img=fopen(filename, "wb");
if(img!=NULL){
fwrite(mapsave, sizeof(float), width*height, img);
/*int ptrIndex=0;
for(ptrIndex=0; ptrIndex<width*height; ptrIndex++){
float elem=mapsave[ptrIndex];
fprintf(img, "%f ", elem);
if(ptrIndex%width==0 && ptrIndex!=0)
fprintf(img, "\n");
else
fprintf(img, " ");
}*/
}
cudaFreeHost(mapsave);
fclose(img);
}
}
/*unbinding textures and cleanup*/
/*checkCudaErrors(cudaUnbindTexture(radius));
checkCudaErrors(cudaUnbindTexture(angle));
checkCudaErrors(cudaUnbindTexture(sensor_model));
checkCudaErrors(cudaFreeArray(r_gpu));
checkCudaErrors(cudaFreeArray(a_gpu));
checkCudaErrors(cudaFree(s_m_gpu));
free(r);
free(a);
free(s_m);
*/
float avg_time=tot_time/len;
printf("avg time:%f\n", avg_time);
getchar();
}
|
120ac2ac64892af1071c821ce50ee4cc7f8d6cfc.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix addition: C = alpha*A + beta*B, where alpha and beta are two scalars.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
// includes, project
//#include <cutil.h>
// includes, kernels
#include "matrixadd_kernel.cu"
#include "matrixadd.h"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold( float*, const float*, const float, const float*, const float, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps, float * error);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void MakeN(Matrix* M, Matrix* N);
void MatrixAddOnDevice(const Matrix M, const float alpha, const Matrix N, const float beta, Matrix P, float * inc, float * exc);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix M;
Matrix N;
Matrix P;
// Number of elements in the solution matrix
// Assuming square matrices, so the sizes of M, N and P are equal
unsigned int size_elements = WP * HP;
int errorM = 0;
srand(2012);
if(argc != 2)
{
printf("Error Usage ./problem2 u\n");
}
int u=atoi(argv[1]);
char filename[100]="problem2.inp";
// Check command line for input matrix files
if(u==0)
{
// No inputs provided
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
}
else
{
// Inputs provided
// Allocate and read source matrices from disk
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
errorM = ReadFile(&M, filename);
MakeN(&M, &N);
// check for read errors
if(errorM != size_elements)
{
printf("Error reading input files %d\n", errorM);
return 1;
}
}
// alpha*M + beta*N on the device
float alpha = 1.f;
float beta = 1.f;
//time the operation
float inclusiveTime, exclusiveTime,norm=0;
MatrixAddOnDevice(M, alpha, N, beta, P,&inclusiveTime,&exclusiveTime);
// compute the matrix addition on the CPU for comparison
Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
hipError_t error;
hipEvent_t start;
error = hipEventCreate(&start);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
hipEvent_t stop;
error = hipEventCreate(&stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = hipEventRecord(start, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
computeGold(reference.elements, M.elements, alpha, N.elements, beta, HM, WM);
// Record the stop event
error = hipEventRecord(stop, NULL);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = hipEventSynchronize(stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = hipEventElapsedTime(&msecTotal, start, stop);
if (error != hipSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error));
exit(EXIT_FAILURE);
}
// check if the device result is equivalent to the expected solution
bool res = CompareResults(reference.elements, P.elements,
size_elements, 0.0001f,&norm);
if(res==0)printf("Test failed\n"); // This should not be printed in the correct implementation
printf("%f\n%f\n%f\n%f\n",sqrt(norm),msecTotal,inclusiveTime, exclusiveTime);
// Free host matrices
free(M.elements);
M.elements = NULL;
free(N.elements);
N.elements = NULL;
free(P.elements);
P.elements = NULL;
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! wrapper around the device implementation
////////////////////////////////////////////////////////////////////////////////
void MatrixAddOnDevice(const Matrix M, const float alpha, const Matrix N, const float beta, Matrix P,float * inc,float *exc)
// ADD YOUR CODE HERE
{
hipEvent_t startEvent_inc, stopEvent_inc, startEvent_exc, stopEvent_exc;
hipEventCreate(&startEvent_inc);
hipEventCreate(&stopEvent_inc);
hipEventCreate(&startEvent_exc);
hipEventCreate(&stopEvent_exc);
float elapsedTime_inc, elapsedTime_exc;
hipEventRecord(startEvent_inc,0); // starting timing for inclusive
//Allocate device matrices
// copy matrices to device
hipEventRecord(startEvent_exc,0); // staring timing for exclusive
//launch kernel
hipEventRecord(stopEvent_exc,0); // ending timing for exclusive
hipEventSynchronize(stopEvent_exc);
hipEventElapsedTime(&elapsedTime_exc, startEvent_exc, stopEvent_exc);
// Read P from the device
hipEventRecord(stopEvent_inc,0); //ending timing for inclusive
hipEventSynchronize(stopEvent_inc);
hipEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
*inc = elapsedTime_inc;
*exc = elapsedTime_exc;
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
hipError_t error;
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
error = hipMalloc((void**)&Mdevice.elements, size);
if (error != hipSuccess)
{
printf("hipMalloc returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
//compare the data stored in two arrays on the host
bool CompareResults(float* A, float* B, int elements, float eps,float * error)
{
for(unsigned int i = 0; i < elements; i++){
float temp = sqrt((A[i]-B[i])*(A[i]-B[i]));
*error+=temp;
if(temp>eps){
return false;
}
}
return true;
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE;
std::ifstream ifile(file_name);
for(unsigned int i = 0; i < data_read; i++){
ifile>>M->elements[i];
}
ifile.close();
return data_read;
}
// Read a 16x16 floating point matrix in from file
void MakeN(Matrix* M, Matrix* N)
{
unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE;
for(unsigned int i = 0; i < data_read; i++){
N->elements[i]=1.f/(0.2f+M->elements[i]);
}
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
std::ofstream ofile(file_name);
for(unsigned int i = 0; i < M.width*M.height; i++){
ofile<<M.elements[i];
}
ofile.close();
}
| 120ac2ac64892af1071c821ce50ee4cc7f8d6cfc.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix addition: C = alpha*A + beta*B, where alpha and beta are two scalars.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <fstream>
// includes, project
//#include <cutil.h>
// includes, kernels
#include "matrixadd_kernel.cu"
#include "matrixadd.h"
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold( float*, const float*, const float, const float*, const float, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
bool CompareResults(float* A, float* B, int elements, float eps, float * error);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void MakeN(Matrix* M, Matrix* N);
void MatrixAddOnDevice(const Matrix M, const float alpha, const Matrix N, const float beta, Matrix P, float * inc, float * exc);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix M;
Matrix N;
Matrix P;
// Number of elements in the solution matrix
// Assuming square matrices, so the sizes of M, N and P are equal
unsigned int size_elements = WP * HP;
int errorM = 0;
srand(2012);
if(argc != 2)
{
printf("Error Usage ./problem2 u\n");
}
int u=atoi(argv[1]);
char filename[100]="problem2.inp";
// Check command line for input matrix files
if(u==0)
{
// No inputs provided
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
}
else
{
// Inputs provided
// Allocate and read source matrices from disk
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
errorM = ReadFile(&M, filename);
MakeN(&M, &N);
// check for read errors
if(errorM != size_elements)
{
printf("Error reading input files %d\n", errorM);
return 1;
}
}
// alpha*M + beta*N on the device
float alpha = 1.f;
float beta = 1.f;
//time the operation
float inclusiveTime, exclusiveTime,norm=0;
MatrixAddOnDevice(M, alpha, N, beta, P,&inclusiveTime,&exclusiveTime);
// compute the matrix addition on the CPU for comparison
Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
cudaError_t error;
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Record the start event
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
computeGold(reference.elements, M.elements, alpha, N.elements, beta, HM, WM);
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
{
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
// check if the device result is equivalent to the expected solution
bool res = CompareResults(reference.elements, P.elements,
size_elements, 0.0001f,&norm);
if(res==0)printf("Test failed\n"); // This should not be printed in the correct implementation
printf("%f\n%f\n%f\n%f\n",sqrt(norm),msecTotal,inclusiveTime, exclusiveTime);
// Free host matrices
free(M.elements);
M.elements = NULL;
free(N.elements);
N.elements = NULL;
free(P.elements);
P.elements = NULL;
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! wrapper around the device implementation
////////////////////////////////////////////////////////////////////////////////
void MatrixAddOnDevice(const Matrix M, const float alpha, const Matrix N, const float beta, Matrix P,float * inc,float *exc)
// ADD YOUR CODE HERE
{
cudaEvent_t startEvent_inc, stopEvent_inc, startEvent_exc, stopEvent_exc;
cudaEventCreate(&startEvent_inc);
cudaEventCreate(&stopEvent_inc);
cudaEventCreate(&startEvent_exc);
cudaEventCreate(&stopEvent_exc);
float elapsedTime_inc, elapsedTime_exc;
cudaEventRecord(startEvent_inc,0); // starting timing for inclusive
//Allocate device matrices
// copy matrices to device
cudaEventRecord(startEvent_exc,0); // staring timing for exclusive
//launch kernel
cudaEventRecord(stopEvent_exc,0); // ending timing for exclusive
cudaEventSynchronize(stopEvent_exc);
cudaEventElapsedTime(&elapsedTime_exc, startEvent_exc, stopEvent_exc);
// Read P from the device
cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive
cudaEventSynchronize(stopEvent_inc);
cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
*inc = elapsedTime_inc;
*exc = elapsedTime_exc;
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
cudaError_t error;
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
error = cudaMalloc((void**)&Mdevice.elements, size);
if (error != cudaSuccess)
{
printf("cudaMalloc returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
//compare the data stored in two arrays on the host
bool CompareResults(float* A, float* B, int elements, float eps,float * error)
{
for(unsigned int i = 0; i < elements; i++){
float temp = sqrt((A[i]-B[i])*(A[i]-B[i]));
*error+=temp;
if(temp>eps){
return false;
}
}
return true;
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE;
std::ifstream ifile(file_name);
for(unsigned int i = 0; i < data_read; i++){
ifile>>M->elements[i];
}
ifile.close();
return data_read;
}
// Read a 16x16 floating point matrix in from file
void MakeN(Matrix* M, Matrix* N)
{
unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE;
for(unsigned int i = 0; i < data_read; i++){
N->elements[i]=1.f/(0.2f+M->elements[i]);
}
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
std::ofstream ofile(file_name);
for(unsigned int i = 0; i < M.width*M.height; i++){
ofile<<M.elements[i];
}
ofile.close();
}
|
f807428bc95800f7e2d2354df0d32a9360d39e94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
//This file will run nkernel many kernels concurrently and each
// of them will sleep for kernel_time ms. These two numbers can
// be passed in as parameters, currently just list the two integers
// in the command line with nkernels first then kernel_time.
//This file is intended to be used for measuring the overhead in creating
// kernels and using GPGPUs
// This is a kernel that does no real work but runs at least for a specified number of clocks
__global__ void clock_block(int kernel_time, int clockRate)
{
int finish_clock;
int start_time;
for(int temp=0; temp<kernel_time; temp++){
start_time = clock();
finish_clock = start_time + clockRate;
bool wrapped = finish_clock < start_time;
while( clock() < finish_clock || wrapped) wrapped = clock()>0 && wrapped;
}
}
int main(int argc, char **argv)
{
//Default values
int nkernels = 16; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int kernel_time = 2500; // time the kernel should run in ms
int cuda_device = 0;
if( argc>2 ){
nkernels = atoi(argv[1]); //could be used to pass in parameters
kernel_time = atoi(argv[2]);
}
//Getting device information, because we need clock_rate later
hipDeviceProp_t deviceProp;
hipGetDevice(&cuda_device);
hipGetDeviceProperties(&deviceProp, cuda_device);
// allocate and initialize an array of stream handles
hipStream_t *streams = (hipStream_t*) malloc(nstreams * sizeof(hipStream_t));
for(int i = 1; i < nstreams; i++)
hipStreamCreate(&(streams[i]));
//////////////////////////////////////////////////////////////////////
int clockRate = deviceProp.clockRate;
//I am starting this at i=1 because the default stream is 0.
for( int i=1; i<nkernels+1; ++i)
{
//printf("starting kernel: %d\n", i);
hipLaunchKernelGGL(( clock_block), dim3(1),dim3(1),1,streams[i], kernel_time, clockRate);
}
//Find an errors that the gpu kernels had
hipError_t cuda_error = hipDeviceSynchronize();
if(cuda_error==hipSuccess){
//printf( " Running the concurrentKernels was a success\n");
}else{
printf("CUDA Error: %s\n", hipGetErrorString(cuda_error));
return 1;
//if(cuda_error==hipErrorLaunchTimeOut ){
// printf( " A thread was stopped for reaching time limit\n" );
//}else{
// printf( " An error happened while running the wait\n" );
//}
}
// release resources
for(int i = 1; i < nstreams; i++)
hipStreamDestroy(streams[i]);
free(streams);
return 0;
}
| f807428bc95800f7e2d2354df0d32a9360d39e94.cu | #include <stdio.h>
//This file will run nkernel many kernels concurrently and each
// of them will sleep for kernel_time ms. These two numbers can
// be passed in as parameters, currently just list the two integers
// in the command line with nkernels first then kernel_time.
//This file is intended to be used for measuring the overhead in creating
// kernels and using GPGPUs
// This is a kernel that does no real work but runs at least for a specified number of clocks
__global__ void clock_block(int kernel_time, int clockRate)
{
int finish_clock;
int start_time;
for(int temp=0; temp<kernel_time; temp++){
start_time = clock();
finish_clock = start_time + clockRate;
bool wrapped = finish_clock < start_time;
while( clock() < finish_clock || wrapped) wrapped = clock()>0 && wrapped;
}
}
int main(int argc, char **argv)
{
//Default values
int nkernels = 16; // number of concurrent kernels
int nstreams = nkernels + 1; // use one more stream than concurrent kernel
int kernel_time = 2500; // time the kernel should run in ms
int cuda_device = 0;
if( argc>2 ){
nkernels = atoi(argv[1]); //could be used to pass in parameters
kernel_time = atoi(argv[2]);
}
//Getting device information, because we need clock_rate later
cudaDeviceProp deviceProp;
cudaGetDevice(&cuda_device);
cudaGetDeviceProperties(&deviceProp, cuda_device);
// allocate and initialize an array of stream handles
cudaStream_t *streams = (cudaStream_t*) malloc(nstreams * sizeof(cudaStream_t));
for(int i = 1; i < nstreams; i++)
cudaStreamCreate(&(streams[i]));
//////////////////////////////////////////////////////////////////////
int clockRate = deviceProp.clockRate;
//I am starting this at i=1 because the default stream is 0.
for( int i=1; i<nkernels+1; ++i)
{
//printf("starting kernel: %d\n", i);
clock_block<<<1,1,1,streams[i]>>>(kernel_time, clockRate);
}
//Find an errors that the gpu kernels had
cudaError cuda_error = cudaDeviceSynchronize();
if(cuda_error==cudaSuccess){
//printf( " Running the concurrentKernels was a success\n");
}else{
printf("CUDA Error: %s\n", cudaGetErrorString(cuda_error));
return 1;
//if(cuda_error==cudaErrorLaunchTimeout ){
// printf( " A thread was stopped for reaching time limit\n" );
//}else{
// printf( " An error happened while running the wait\n" );
//}
}
// release resources
for(int i = 1; i < nstreams; i++)
cudaStreamDestroy(streams[i]);
free(streams);
return 0;
}
|
ddd4b33da28866fcafbb41b0652cb564436c32f0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void ReduceHKernelSimple(const uint8_t *src, float *dst, int width, int height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < width) {
float sum = 0;
for (int y = 0; y < height; ++y) {
sum += src[x + y * width];
}
dst[x] = sum;
}
} | ddd4b33da28866fcafbb41b0652cb564436c32f0.cu | #include "includes.h"
__global__ void ReduceHKernelSimple(const uint8_t *src, float *dst, int width, int height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < width) {
float sum = 0;
for (int y = 0; y < height; ++y) {
sum += src[x + y * width];
}
dst[x] = sum;
}
} |
d8fa2effa4df5812fe541eb0ed51c72f37691824.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/platform/float16.h"
#define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h
#include <glog/logging.h>
#include <gtest/gtest.h>
#include <bitset>
#include <iostream>
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/enforce.h"
#define ARITHMETIC_KERNEL(op_type, sign) \
__global__ void op_type(const half* in1, const half* in2, half* out) { \
out[0] = in1[0] sign in2[0]; \
}
#define COMPOUND_KERNEL(op_type, sign) \
__global__ void op_type(half* in1, const half* in2) { in1[0] sign in2[0]; }
#define COMPARISON_KERNEL(op_type, sign) \
__global__ void op_type(const half* in1, const half* in2, bool* out) { \
out[0] = in1[0] sign in2[0]; \
}
#define ARITHMETIC_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, float v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2, *out; \
half *d_in1, *d_in2, *d_out; \
int size = sizeof(half); \
hipMalloc(reinterpret_cast<void**>(&d_in1), size); \
hipMalloc(reinterpret_cast<void**>(&d_in2), size); \
hipMalloc(reinterpret_cast<void**>(&d_out), size); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
out = reinterpret_cast<half*>(malloc(size)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); \
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); \
hipLaunchKernelGGL(( op_type), dim3(1), dim3(1), 0, 0, d_in1, d_in2, d_out); \
hipMemcpy(out, d_out, size, hipMemcpyDeviceToHost); \
EXPECT_EQ(static_cast<float>(float16(out[0])), v_out); \
free(in1); \
free(in2); \
free(out); \
hipFree(d_in1); \
hipFree(d_in2); \
hipFree(d_out); \
}
#define COMPOUND_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, float v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2; \
half *d_in1, *d_in2; \
int size = sizeof(half); \
hipMalloc(reinterpret_cast<void**>(&d_in1), size); \
hipMalloc(reinterpret_cast<void**>(&d_in2), size); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); \
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); \
hipLaunchKernelGGL(( op_type), dim3(1), dim3(1), 0, 0, d_in1, d_in2); \
hipMemcpy(in1, d_in1, size, hipMemcpyDeviceToHost); \
EXPECT_EQ(static_cast<float>(float16(in1[0])), v_out); \
free(in1); \
free(in2); \
hipFree(d_in1); \
hipFree(d_in2); \
}
#define COMPARISON_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, bool v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2; \
half *d_in1, *d_in2; \
bool *out, *d_out; \
int size = sizeof(half); \
hipMalloc(reinterpret_cast<void**>(&d_in1), size); \
hipMalloc(reinterpret_cast<void**>(&d_in2), size); \
hipMalloc(reinterpret_cast<void**>(&d_out), 1); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
out = reinterpret_cast<bool*>(malloc(1)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
hipMemcpy(d_in1, in1, size, hipMemcpyHostToDevice); \
hipMemcpy(d_in2, in2, size, hipMemcpyHostToDevice); \
hipLaunchKernelGGL(( op_type), dim3(1), dim3(1), 0, 0, d_in1, d_in2, d_out); \
hipMemcpy(out, d_out, 1, hipMemcpyDeviceToHost); \
EXPECT_EQ(out[0], v_out); \
free(in1); \
free(in2); \
free(out); \
hipFree(d_in1); \
hipFree(d_in2); \
hipFree(d_out); \
}
#ifdef PADDLE_CUDA_FP16
namespace paddle {
namespace platform {
#if TORCH_HIP_VERSION < 9000
ARITHMETIC_KERNEL(Add, +)
ARITHMETIC_KERNEL(Sub, -)
ARITHMETIC_KERNEL(Mul, *)
ARITHMETIC_KERNEL(Div, /)
ARITHMETIC_KERNEL_LAUNCH(Add)
ARITHMETIC_KERNEL_LAUNCH(Sub)
ARITHMETIC_KERNEL_LAUNCH(Mul)
ARITHMETIC_KERNEL_LAUNCH(Div)
// Negative sign kernel
__global__ void Neg(half* in) { in[0] = -in[0]; }
void TestNeg(float v_in, float v_out) {
LOG(INFO) << "Test Neg on GPU!";
half *in, *d_in;
int size = sizeof(half);
hipMalloc(reinterpret_cast<void**>(&d_in), size);
in = reinterpret_cast<half*>(malloc(size));
in[0] = half(float16(v_in));
hipMemcpy(d_in, in, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( Neg), dim3(1), dim3(1), 0, 0, d_in);
hipMemcpy(in, d_in, size, hipMemcpyDeviceToHost);
EXPECT_EQ(static_cast<float>(float16(in[0])), v_out);
free(in);
hipFree(d_in);
}
COMPOUND_KERNEL(AddAssign, +=)
COMPOUND_KERNEL(SubAssign, -=)
COMPOUND_KERNEL(MulAssign, *=)
COMPOUND_KERNEL(DivAssign, /=)
COMPOUND_KERNEL_LAUNCH(AddAssign)
COMPOUND_KERNEL_LAUNCH(SubAssign)
COMPOUND_KERNEL_LAUNCH(MulAssign)
COMPOUND_KERNEL_LAUNCH(DivAssign)
COMPARISON_KERNEL(Equal, ==)
COMPARISON_KERNEL(NotEqual, !=)
COMPARISON_KERNEL(Less, <)
COMPARISON_KERNEL(LessEqual, <=)
COMPARISON_KERNEL(Greater, >)
COMPARISON_KERNEL(GreaterEqual, >=)
COMPARISON_KERNEL_LAUNCH(Equal)
COMPARISON_KERNEL_LAUNCH(NotEqual)
COMPARISON_KERNEL_LAUNCH(Less)
COMPARISON_KERNEL_LAUNCH(LessEqual)
COMPARISON_KERNEL_LAUNCH(Greater)
COMPARISON_KERNEL_LAUNCH(GreaterEqual)
TEST(float16, arithmetic_on_gpu) {
TestAdd(1, 2, 3);
TestSub(2, 1, 1);
TestMul(2, 3, 6);
TestDiv(6, 2, 3);
TestNeg(1, -1);
}
TEST(float16, compound_on_gpu) {
TestAddAssign(1, 2, 3);
TestSubAssign(2, 1, 1);
TestMulAssign(2, 3, 6);
TestDivAssign(6, 2, 3);
}
TEST(float16, comparision_on_gpu) {
TestEqual(1, 1, true);
TestEqual(1, 2, false);
TestNotEqual(2, 3, true);
TestNotEqual(2, 2, false);
TestLess(3, 4, true);
TestLess(3, 3, false);
TestLessEqual(3, 3, true);
TestLessEqual(3, 2, false);
TestGreater(4, 3, true);
TestGreater(4, 4, false);
TestGreaterEqual(4, 4, true);
TestGreaterEqual(4, 5, false);
}
#endif // TORCH_HIP_VERSION
TEST(float16, conversion_on_gpu) {
// Explicit conversion to and from cuda half
EXPECT_EQ(float16(half(float16(1.0f))).x, 0x3c00);
EXPECT_EQ(float16(half(float16(0.5f))).x, 0x3800);
EXPECT_EQ(float16(half(float16(0.33333f))).x, 0x3555);
EXPECT_EQ(float16(half(float16(0.0f))).x, 0x0000);
EXPECT_EQ(float16(half(float16(-0.0f))).x, 0x8000);
EXPECT_EQ(float16(half(float16(65504.0f))).x, 0x7bff);
EXPECT_EQ(float16(half(float16(65536.0f))).x, 0x7c00);
// Assignment operator
float16 v_assign;
v_assign = half(float16(1.0f));
EXPECT_EQ(v_assign.x, 0x3c00);
}
TEST(float16, lod_tensor_on_gpu) {
framework::LoDTensor src_tensor;
framework::LoDTensor gpu_tensor;
framework::LoDTensor dst_tensor;
float16* src_ptr = src_tensor.mutable_data<float16>(
framework::make_ddim({2, 2}), CPUPlace());
float16 arr[4] = {float16(1.0f), float16(0.5f), float16(0.33333f),
float16(0.0f)};
memcpy(src_ptr, arr, 4 * sizeof(float16));
// CPU LoDTensor to GPU LoDTensor
CUDAPlace gpu_place(0);
CUDADeviceContext gpu_ctx(gpu_place);
framework::TensorCopy(src_tensor, gpu_place, gpu_ctx, &gpu_tensor);
// GPU LoDTensor to CPU LoDTensor
framework::TensorCopy(gpu_tensor, CPUPlace(), gpu_ctx, &dst_tensor);
// Sync before comparing LoDTensors
gpu_ctx.Wait();
const float16* dst_ptr = dst_tensor.data<float16>();
ASSERT_NE(src_ptr, dst_ptr);
for (size_t i = 0; i < 4; ++i) {
EXPECT_EQ(src_ptr[i].x, dst_ptr[i].x);
}
}
template <typename T>
struct Functor {
bool operator()(const T& val) {
return std::type_index(typeid(T)) ==
std::type_index(typeid(platform::float16));
}
};
TEST(float16, typeid) {
// the framework heavily used typeid hash
Functor<float16> functor;
float16 a = float16(.0f);
Functor<int> functor2;
int b(0);
// compile time assert
PADDLE_ENFORCE_EQ(functor(a), true);
PADDLE_ENFORCE_EQ(functor2(b), false);
}
// GPU test
TEST(float16, isinf) {
float16 a;
a.x = 0x7c00;
float16 b = float16(INFINITY);
// underflow to 0
float16 native_a(5e-40f);
EXPECT_EQ(std::isinf(a), true);
EXPECT_EQ(std::isinf(b), true);
#ifndef _WIN32
// overflow to inf
float16 native_b(5e40f);
EXPECT_EQ(std::isinf(native_b), true);
#endif
EXPECT_EQ(native_a, float16(0));
}
TEST(float16, isnan) {
float16 a;
a.x = 0x7fff;
float16 b = float16(NAN);
float16 c = float16(5e40);
// inf * +-0 will get a nan
float16 d = c * float16(0);
EXPECT_EQ(std::isnan(a), true);
EXPECT_EQ(std::isnan(b), true);
EXPECT_EQ(std::isnan(d), true);
}
TEST(float16, cast) {
float16 a;
a.x = 0x0070;
auto b = a;
{
// change semantic, keep the same value
float16 c = reinterpret_cast<float16&>(reinterpret_cast<unsigned&>(b));
EXPECT_EQ(b, c);
}
{
// use uint32 low 16 bit store float16
uint32_t c = reinterpret_cast<uint32_t&>(b);
float16 d;
d.x = c;
EXPECT_EQ(b, d);
}
}
} // namespace platform
} // namespace paddle
#endif // PADDLE_CUDA_FP16
| d8fa2effa4df5812fe541eb0ed51c72f37691824.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/platform/float16.h"
#define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h
#include <glog/logging.h>
#include <gtest/gtest.h>
#include <bitset>
#include <iostream>
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/enforce.h"
#define ARITHMETIC_KERNEL(op_type, sign) \
__global__ void op_type(const half* in1, const half* in2, half* out) { \
out[0] = in1[0] sign in2[0]; \
}
#define COMPOUND_KERNEL(op_type, sign) \
__global__ void op_type(half* in1, const half* in2) { in1[0] sign in2[0]; }
#define COMPARISON_KERNEL(op_type, sign) \
__global__ void op_type(const half* in1, const half* in2, bool* out) { \
out[0] = in1[0] sign in2[0]; \
}
#define ARITHMETIC_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, float v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2, *out; \
half *d_in1, *d_in2, *d_out; \
int size = sizeof(half); \
cudaMalloc(reinterpret_cast<void**>(&d_in1), size); \
cudaMalloc(reinterpret_cast<void**>(&d_in2), size); \
cudaMalloc(reinterpret_cast<void**>(&d_out), size); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
out = reinterpret_cast<half*>(malloc(size)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \
cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \
op_type<<<1, 1>>>(d_in1, d_in2, d_out); \
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); \
EXPECT_EQ(static_cast<float>(float16(out[0])), v_out); \
free(in1); \
free(in2); \
free(out); \
cudaFree(d_in1); \
cudaFree(d_in2); \
cudaFree(d_out); \
}
#define COMPOUND_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, float v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2; \
half *d_in1, *d_in2; \
int size = sizeof(half); \
cudaMalloc(reinterpret_cast<void**>(&d_in1), size); \
cudaMalloc(reinterpret_cast<void**>(&d_in2), size); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \
cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \
op_type<<<1, 1>>>(d_in1, d_in2); \
cudaMemcpy(in1, d_in1, size, cudaMemcpyDeviceToHost); \
EXPECT_EQ(static_cast<float>(float16(in1[0])), v_out); \
free(in1); \
free(in2); \
cudaFree(d_in1); \
cudaFree(d_in2); \
}
#define COMPARISON_KERNEL_LAUNCH(op_type) \
void Test##op_type(float v_in1, float v_in2, bool v_out) { \
LOG(INFO) << "Test " << #op_type << " on GPU!"; \
half *in1, *in2; \
half *d_in1, *d_in2; \
bool *out, *d_out; \
int size = sizeof(half); \
cudaMalloc(reinterpret_cast<void**>(&d_in1), size); \
cudaMalloc(reinterpret_cast<void**>(&d_in2), size); \
cudaMalloc(reinterpret_cast<void**>(&d_out), 1); \
in1 = reinterpret_cast<half*>(malloc(size)); \
in2 = reinterpret_cast<half*>(malloc(size)); \
out = reinterpret_cast<bool*>(malloc(1)); \
in1[0] = half(float16(v_in1)); \
in2[0] = half(float16(v_in2)); \
cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); \
cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); \
op_type<<<1, 1>>>(d_in1, d_in2, d_out); \
cudaMemcpy(out, d_out, 1, cudaMemcpyDeviceToHost); \
EXPECT_EQ(out[0], v_out); \
free(in1); \
free(in2); \
free(out); \
cudaFree(d_in1); \
cudaFree(d_in2); \
cudaFree(d_out); \
}
#ifdef PADDLE_CUDA_FP16
namespace paddle {
namespace platform {
#if CUDA_VERSION < 9000
ARITHMETIC_KERNEL(Add, +)
ARITHMETIC_KERNEL(Sub, -)
ARITHMETIC_KERNEL(Mul, *)
ARITHMETIC_KERNEL(Div, /)
ARITHMETIC_KERNEL_LAUNCH(Add)
ARITHMETIC_KERNEL_LAUNCH(Sub)
ARITHMETIC_KERNEL_LAUNCH(Mul)
ARITHMETIC_KERNEL_LAUNCH(Div)
// Negative sign kernel
__global__ void Neg(half* in) { in[0] = -in[0]; }
void TestNeg(float v_in, float v_out) {
LOG(INFO) << "Test Neg on GPU!";
half *in, *d_in;
int size = sizeof(half);
cudaMalloc(reinterpret_cast<void**>(&d_in), size);
in = reinterpret_cast<half*>(malloc(size));
in[0] = half(float16(v_in));
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
Neg<<<1, 1>>>(d_in);
cudaMemcpy(in, d_in, size, cudaMemcpyDeviceToHost);
EXPECT_EQ(static_cast<float>(float16(in[0])), v_out);
free(in);
cudaFree(d_in);
}
COMPOUND_KERNEL(AddAssign, +=)
COMPOUND_KERNEL(SubAssign, -=)
COMPOUND_KERNEL(MulAssign, *=)
COMPOUND_KERNEL(DivAssign, /=)
COMPOUND_KERNEL_LAUNCH(AddAssign)
COMPOUND_KERNEL_LAUNCH(SubAssign)
COMPOUND_KERNEL_LAUNCH(MulAssign)
COMPOUND_KERNEL_LAUNCH(DivAssign)
COMPARISON_KERNEL(Equal, ==)
COMPARISON_KERNEL(NotEqual, !=)
COMPARISON_KERNEL(Less, <)
COMPARISON_KERNEL(LessEqual, <=)
COMPARISON_KERNEL(Greater, >)
COMPARISON_KERNEL(GreaterEqual, >=)
COMPARISON_KERNEL_LAUNCH(Equal)
COMPARISON_KERNEL_LAUNCH(NotEqual)
COMPARISON_KERNEL_LAUNCH(Less)
COMPARISON_KERNEL_LAUNCH(LessEqual)
COMPARISON_KERNEL_LAUNCH(Greater)
COMPARISON_KERNEL_LAUNCH(GreaterEqual)
TEST(float16, arithmetic_on_gpu) {
TestAdd(1, 2, 3);
TestSub(2, 1, 1);
TestMul(2, 3, 6);
TestDiv(6, 2, 3);
TestNeg(1, -1);
}
TEST(float16, compound_on_gpu) {
TestAddAssign(1, 2, 3);
TestSubAssign(2, 1, 1);
TestMulAssign(2, 3, 6);
TestDivAssign(6, 2, 3);
}
TEST(float16, comparision_on_gpu) {
TestEqual(1, 1, true);
TestEqual(1, 2, false);
TestNotEqual(2, 3, true);
TestNotEqual(2, 2, false);
TestLess(3, 4, true);
TestLess(3, 3, false);
TestLessEqual(3, 3, true);
TestLessEqual(3, 2, false);
TestGreater(4, 3, true);
TestGreater(4, 4, false);
TestGreaterEqual(4, 4, true);
TestGreaterEqual(4, 5, false);
}
#endif // CUDA_VERSION
TEST(float16, conversion_on_gpu) {
// Explicit conversion to and from cuda half
EXPECT_EQ(float16(half(float16(1.0f))).x, 0x3c00);
EXPECT_EQ(float16(half(float16(0.5f))).x, 0x3800);
EXPECT_EQ(float16(half(float16(0.33333f))).x, 0x3555);
EXPECT_EQ(float16(half(float16(0.0f))).x, 0x0000);
EXPECT_EQ(float16(half(float16(-0.0f))).x, 0x8000);
EXPECT_EQ(float16(half(float16(65504.0f))).x, 0x7bff);
EXPECT_EQ(float16(half(float16(65536.0f))).x, 0x7c00);
// Assignment operator
float16 v_assign;
v_assign = half(float16(1.0f));
EXPECT_EQ(v_assign.x, 0x3c00);
}
TEST(float16, lod_tensor_on_gpu) {
framework::LoDTensor src_tensor;
framework::LoDTensor gpu_tensor;
framework::LoDTensor dst_tensor;
float16* src_ptr = src_tensor.mutable_data<float16>(
framework::make_ddim({2, 2}), CPUPlace());
float16 arr[4] = {float16(1.0f), float16(0.5f), float16(0.33333f),
float16(0.0f)};
memcpy(src_ptr, arr, 4 * sizeof(float16));
// CPU LoDTensor to GPU LoDTensor
CUDAPlace gpu_place(0);
CUDADeviceContext gpu_ctx(gpu_place);
framework::TensorCopy(src_tensor, gpu_place, gpu_ctx, &gpu_tensor);
// GPU LoDTensor to CPU LoDTensor
framework::TensorCopy(gpu_tensor, CPUPlace(), gpu_ctx, &dst_tensor);
// Sync before comparing LoDTensors
gpu_ctx.Wait();
const float16* dst_ptr = dst_tensor.data<float16>();
ASSERT_NE(src_ptr, dst_ptr);
for (size_t i = 0; i < 4; ++i) {
EXPECT_EQ(src_ptr[i].x, dst_ptr[i].x);
}
}
template <typename T>
struct Functor {
bool operator()(const T& val) {
return std::type_index(typeid(T)) ==
std::type_index(typeid(platform::float16));
}
};
TEST(float16, typeid) {
// the framework heavily used typeid hash
Functor<float16> functor;
float16 a = float16(.0f);
Functor<int> functor2;
int b(0);
// compile time assert
PADDLE_ENFORCE_EQ(functor(a), true);
PADDLE_ENFORCE_EQ(functor2(b), false);
}
// GPU test
TEST(float16, isinf) {
float16 a;
a.x = 0x7c00;
float16 b = float16(INFINITY);
// underflow to 0
float16 native_a(5e-40f);
EXPECT_EQ(std::isinf(a), true);
EXPECT_EQ(std::isinf(b), true);
#ifndef _WIN32
// overflow to inf
float16 native_b(5e40f);
EXPECT_EQ(std::isinf(native_b), true);
#endif
EXPECT_EQ(native_a, float16(0));
}
TEST(float16, isnan) {
float16 a;
a.x = 0x7fff;
float16 b = float16(NAN);
float16 c = float16(5e40);
// inf * +-0 will get a nan
float16 d = c * float16(0);
EXPECT_EQ(std::isnan(a), true);
EXPECT_EQ(std::isnan(b), true);
EXPECT_EQ(std::isnan(d), true);
}
TEST(float16, cast) {
float16 a;
a.x = 0x0070;
auto b = a;
{
// change semantic, keep the same value
float16 c = reinterpret_cast<float16&>(reinterpret_cast<unsigned&>(b));
EXPECT_EQ(b, c);
}
{
// use uint32 low 16 bit store float16
uint32_t c = reinterpret_cast<uint32_t&>(b);
float16 d;
d.x = c;
EXPECT_EQ(b, d);
}
}
} // namespace platform
} // namespace paddle
#endif // PADDLE_CUDA_FP16
|
eb628066658fb6243592613c622b8d06102c63ef.hip | // !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/SparseStubs.h>
#include <ATen/native/sparse/SparseBinaryOpIntersectionCommon.h>
#include <ATen/native/hip/Loops.cuh>
namespace at {
namespace native {
namespace {
template <typename func_t>
struct CUDAKernelLauncher {
static void launch(TensorIteratorBase& iter, const func_t& f) {
gpu_kernel(iter, f);
}
};
struct MulOp {
static Tensor apply(const Tensor& a, const Tensor& b) {
return a.mul(b);
}
};
void mul_sparse_sparse_out_cuda_kernel(
Tensor& result,
const Tensor& x,
const Tensor& y) {
_sparse_binary_op_intersection_kernel_out<CUDAKernelLauncher, MulOp>(
result, x, y
);
}
}
REGISTER_CUDA_DISPATCH(mul_sparse_sparse_out_stub, &mul_sparse_sparse_out_cuda_kernel);
}}
| eb628066658fb6243592613c622b8d06102c63ef.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/native/sparse/SparseStubs.h>
#include <ATen/native/sparse/SparseBinaryOpIntersectionCommon.h>
#include <ATen/native/cuda/Loops.cuh>
namespace at {
namespace native {
namespace {
template <typename func_t>
struct CUDAKernelLauncher {
static void launch(TensorIteratorBase& iter, const func_t& f) {
gpu_kernel(iter, f);
}
};
struct MulOp {
static Tensor apply(const Tensor& a, const Tensor& b) {
return a.mul(b);
}
};
void mul_sparse_sparse_out_cuda_kernel(
Tensor& result,
const Tensor& x,
const Tensor& y) {
_sparse_binary_op_intersection_kernel_out<CUDAKernelLauncher, MulOp>(
result, x, y
);
}
}
REGISTER_CUDA_DISPATCH(mul_sparse_sparse_out_stub, &mul_sparse_sparse_out_cuda_kernel);
}}
|
ec9ba3c44065631a49294159247ab8127279bccc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
int main(void) {
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Setup input values
a = 2;
b = 7;
// Copy inputs to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
// Launch add() kernel on GPU
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("%d + %d is %d\n", a,b,c);
//printf("%d + %d is %d\n", *d_a,*d_b,*d_c);
// Cleanup
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| ec9ba3c44065631a49294159247ab8127279bccc.cu | #include <stdio.h>
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
int main(void) {
int a, b, c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Setup input values
a = 2;
b = 7;
// Copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<1,1>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("%d + %d is %d\n", a,b,c);
//printf("%d + %d is %d\n", *d_a,*d_b,*d_c);
// Cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
126ff7c0a83d5e97552ca91be04a5c0e3d9668af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main(void) {
int c;
int *dev_c;
hipMalloc((void**)&dev_c, sizeof(int));
hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, 2, 7, dev_c);
cudaMemory(&c,
dev_c,
sizeof(int),
hipMemcpyDeviceToHost);
printf("2 + 7 = %d\n", c);
hipFree(dev_c);
return 0;
}
| 126ff7c0a83d5e97552ca91be04a5c0e3d9668af.cu | #include <iostream>
__global__ void add(int a, int b, int *c) {
*c = a + b;
}
int main(void) {
int c;
int *dev_c;
cudaMalloc((void**)&dev_c, sizeof(int));
add<<<1, 1>>>(2, 7, dev_c);
cudaMemory(&c,
dev_c,
sizeof(int),
cudaMemcpyDeviceToHost);
printf("2 + 7 = %d\n", c);
cudaFree(dev_c);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.